text
stringlengths 96
319k
| id
stringlengths 14
178
| metadata
dict |
---|---|---|
.PHONY: quality style test docs
check_dirs := src tests examples docs scripts docker
# Check that source code meets quality standards
# this target runs checks on all files
quality:
ruff check $(check_dirs)
ruff format --check $(check_dirs)
doc-builder style src/peft tests docs/source --max_len 119 --check_only
# Format source code automatically and check is there are any problems left that need manual fixing
style:
ruff check --fix $(check_dirs)
ruff format $(check_dirs)
doc-builder style src/peft tests docs/source --max_len 119
test:
python -m pytest -n 3 tests/ $(if $(IS_GITHUB_CI),--report-log "ci_tests.log",)
tests_examples_multi_gpu:
python -m pytest -m multi_gpu_tests tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "multi_gpu_examples.log",)
tests_examples_single_gpu:
python -m pytest -m single_gpu_tests tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "single_gpu_examples.log",)
tests_core_multi_gpu:
python -m pytest -m multi_gpu_tests tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_multi_gpu.log",)
tests_core_single_gpu:
python -m pytest -m single_gpu_tests tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_single_gpu.log",)
tests_common_gpu:
python -m pytest tests/test_decoder_models.py $(if $(IS_GITHUB_CI),--report-log "common_decoder.log",)
python -m pytest tests/test_encoder_decoder_models.py $(if $(IS_GITHUB_CI),--report-log "common_encoder_decoder.log",)
python -m pytest tests/test_gptqmodel.py $(if $(IS_GITHUB_CI),--report-log "gptqmodel_gpu.log",)
tests_examples_multi_gpu_bnb:
python -m pytest -m "multi_gpu_tests and bitsandbytes" tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "multi_gpu_examples.log",)
tests_examples_single_gpu_bnb:
python -m pytest -m "single_gpu_tests and bitsandbytes" tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "single_gpu_examples.log",)
tests_core_multi_gpu_bnb:
python -m pytest -m "multi_gpu_tests and bitsandbytes" tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_multi_gpu.log",)
tests_core_single_gpu_bnb:
python -m pytest -m "single_gpu_tests and bitsandbytes" tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_single_gpu.log",)
tests_gpu_bnb_regression:
python -m pytest tests/bnb/test_bnb_regression.py $(if $(IS_GITHUB_CI),--report-log "bnb_regression_gpu.log",)
# For testing transformers tests for bnb runners
transformers_tests:
RUN_SLOW=1 python -m pytest transformers-clone/tests/quantization/bnb $(if $(IS_GITHUB_CI),--report-log "transformers_tests.log",)
tests_regression:
python -m pytest -s --regression tests/regression/ $(if $(IS_GITHUB_CI),--report-log "regression_tests.log",)
tests_torch_compile:
python -m pytest tests/test_torch_compile.py $(if $(IS_GITHUB_CI),--report-log "compile_tests.log",)
| peft/Makefile/0 | {
"file_path": "peft/Makefile",
"repo_id": "peft",
"token_count": 1062
} |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# AutoPeftModels
The `AutoPeftModel` classes loads the appropriate PEFT model for the task type by automatically inferring it from the configuration file. They are designed to quickly and easily load a PEFT model in a single line of code without having to worry about which exact model class you need or manually loading a [`PeftConfig`].
## AutoPeftModel
[[autodoc]] auto.AutoPeftModel
- from_pretrained
## AutoPeftModelForCausalLM
[[autodoc]] auto.AutoPeftModelForCausalLM
## AutoPeftModelForSeq2SeqLM
[[autodoc]] auto.AutoPeftModelForSeq2SeqLM
## AutoPeftModelForSequenceClassification
[[autodoc]] auto.AutoPeftModelForSequenceClassification
## AutoPeftModelForTokenClassification
[[autodoc]] auto.AutoPeftModelForTokenClassification
## AutoPeftModelForQuestionAnswering
[[autodoc]] auto.AutoPeftModelForQuestionAnswering
## AutoPeftModelForFeatureExtraction
[[autodoc]] auto.AutoPeftModelForFeatureExtraction
| peft/docs/source/package_reference/auto_class.md/0 | {
"file_path": "peft/docs/source/package_reference/auto_class.md",
"repo_id": "peft",
"token_count": 470
} |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# PEFT integrations
PEFT's practical benefits extends to other Hugging Face libraries like [Diffusers](https://hf.co/docs/diffusers) and [Transformers](https://hf.co/docs/transformers). One of the main benefits of PEFT is that an adapter file generated by a PEFT method is a lot smaller than the original model, which makes it super easy to manage and use multiple adapters. You can use one pretrained base model for multiple tasks by simply loading a new adapter finetuned for the task you're solving. Or you can combine multiple adapters with a text-to-image diffusion model to create new effects.
This tutorial will show you how PEFT can help you manage adapters in Diffusers and Transformers.
## Diffusers
Diffusers is a generative AI library for creating images and videos from text or images with diffusion models. LoRA is an especially popular training method for diffusion models because you can very quickly train and share diffusion models to generate images in new styles. To make it easier to use and try multiple LoRA models, Diffusers uses the PEFT library to help manage different adapters for inference.
For example, load a base model and then load the [artificialguybr/3DRedmond-V1](https://huggingface.co/artificialguybr/3DRedmond-V1) adapter for inference with the [`load_lora_weights`](https://huggingface.co/docs/diffusers/v0.24.0/en/api/loaders/lora#diffusers.loaders.LoraLoaderMixin.load_lora_weights) method. The `adapter_name` argument in the loading method is enabled by PEFT and allows you to set a name for the adapter so it is easier to reference.
```py
import torch
from diffusers import DiffusionPipeline
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
).to("cuda")
pipeline.load_lora_weights(
"peft-internal-testing/artificialguybr__3DRedmond-V1",
weight_name="3DRedmond-3DRenderStyle-3DRenderAF.safetensors",
adapter_name="3d"
)
image = pipeline("sushi rolls shaped like kawaii cat faces").images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/test-lora-diffusers.png"/>
</div>
Now let's try another cool LoRA model, [ostris/super-cereal-sdxl-lora](https://huggingface.co/ostris/super-cereal-sdxl-lora). All you need to do is load and name this new adapter with `adapter_name`, and use the [`set_adapters`](https://huggingface.co/docs/diffusers/api/loaders/unet#diffusers.loaders.UNet2DConditionLoadersMixin.set_adapters) method to set it as the currently active adapter.
```py
pipeline.load_lora_weights(
"ostris/super-cereal-sdxl-lora",
weight_name="cereal_box_sdxl_v1.safetensors",
adapter_name="cereal"
)
pipeline.set_adapters("cereal")
image = pipeline("sushi rolls shaped like kawaii cat faces").images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/test-lora-diffusers-2.png"/>
</div>
Finally, you can call the [`disable_lora`](https://huggingface.co/docs/diffusers/api/loaders/unet#diffusers.loaders.UNet2DConditionLoadersMixin.disable_lora) method to restore the base model.
```py
pipeline.disable_lora()
```
Learn more about how PEFT supports Diffusers in the [Inference with PEFT](https://huggingface.co/docs/diffusers/tutorials/using_peft_for_inference) tutorial.
## Transformers
🤗 [Transformers](https://hf.co/docs/transformers) is a collection of pretrained models for all types of tasks in all modalities. You can load these models for training or inference. Many of the models are large language models (LLMs), so it makes sense to integrate PEFT with Transformers to manage and train adapters.
Load a base pretrained model to train.
```py
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m")
```
Next, add an adapter configuration to specify how to adapt the model parameters. Call the [`~PeftModel.add_adapter`] method to add the configuration to the base model.
```py
from peft import LoraConfig
peft_config = LoraConfig(
lora_alpha=16,
lora_dropout=0.1,
r=64,
bias="none",
task_type="CAUSAL_LM"
)
model.add_adapter(peft_config)
```
Now you can train the model with Transformer's [`~transformers.Trainer`] class or whichever training framework you prefer.
To use the newly trained model for inference, the [`~transformers.AutoModel`] class uses PEFT on the backend to load the adapter weights and configuration file into a base pretrained model.
```py
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("peft-internal-testing/opt-350m-lora")
```
Alternatively, you can use transformers [Pipelines](https://huggingface.co/docs/transformers/en/main_classes/pipelines) to load the model for conveniently running inference:
```py
from transformers import pipeline
model = pipeline("text-generation", "peft-internal-testing/opt-350m-lora")
print(model("Hello World"))
```
If you're interested in comparing or using more than one adapter, you can call the [`~PeftModel.add_adapter`] method to add the adapter configuration to the base model. The only requirement is the adapter type must be the same (you can't mix a LoRA and LoHa adapter).
```py
from transformers import AutoModelForCausalLM
from peft import LoraConfig
model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m")
model.add_adapter(lora_config_1, adapter_name="adapter_1")
```
Call [`~PeftModel.add_adapter`] again to attach a new adapter to the base model.
```py
model.add_adapter(lora_config_2, adapter_name="adapter_2")
```
Then you can use [`~PeftModel.set_adapter`] to set the currently active adapter.
```py
model.set_adapter("adapter_1")
output = model.generate(**inputs)
print(tokenizer.decode(output_disabled[0], skip_special_tokens=True))
```
To disable the adapter, call the [disable_adapters](https://github.com/huggingface/transformers/blob/4e3490f79b40248c53ee54365a9662611e880892/src/transformers/integrations/peft.py#L313) method.
```py
model.disable_adapters()
```
The [enable_adapters](https://github.com/huggingface/transformers/blob/4e3490f79b40248c53ee54365a9662611e880892/src/transformers/integrations/peft.py#L336) can be used to enable the adapters again.
If you're curious, check out the [Load and train adapters with PEFT](https://huggingface.co/docs/transformers/main/peft) tutorial to learn more.
| peft/docs/source/tutorial/peft_integrations.md/0 | {
"file_path": "peft/docs/source/tutorial/peft_integrations.md",
"repo_id": "peft",
"token_count": 2255
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.distributed as dist
from datasets import load_dataset
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments
from utils import DataCollator, TokenizerMetaMath
from peft import EvaConfig, LoraConfig, get_eva_state_dict, get_peft_model, initialize_lora_eva_weights
# run this script e.g. with: torchrun --nproc_per_node=4 eva_finetuning_multi_gpu.py
# config
model_name = "meta-llama/Llama-2-7b-hf"
max_seq_len = 512
rank = 16
alpha = 1
rho = 2.0
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj"]
svd_batch_size = 4 # can be different from the batch size used in finetuning
batch_size = 4
learning_rate = 5e-4
gradient_accumulation_steps = 8
num_epochs = 1
output_dir = "outputs"
bf16 = True
# Initialize distributed environment
if torch.cuda.is_available():
local_rank = int(os.environ.get("LOCAL_RANK", -1))
torch.cuda.set_device(local_rank)
dist.init_process_group("nccl")
world_size = dist.get_world_size()
else:
local_rank = -1
world_size = 1
# load model and tokenizer
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# load dataset
dataset = load_dataset("meta-math/MetaMathQA")
dataset = dataset.map(
TokenizerMetaMath(model_name),
batched=True,
remove_columns=dataset["train"].column_names,
)
dataset.set_format(type="torch")
# data collator
data_collator = DataCollator(tokenizer.eos_token_id, max_length=max_seq_len)
# Create sampler for distributed training
sampler = DistributedSampler(dataset["train"], num_replicas=world_size, rank=local_rank)
# dataloader
dataloader = DataLoader(
dataset["train"],
batch_size=svd_batch_size,
collate_fn=data_collator,
sampler=sampler,
shuffle=False,
)
sampler.set_epoch(0)
# Wrap model in DDP
model = model.to(local_rank)
model = DDP(model, device_ids=[local_rank], output_device=local_rank)
# setup peft config
eva_config = EvaConfig(rho=rho)
peft_config = LoraConfig(
r=rank, lora_alpha=alpha, target_modules=target_modules, init_lora_weights="eva", eva_config=eva_config
)
# EVA initialization
eva_state_dict = get_eva_state_dict(model, dataloader, peft_config)
eva_state_dict = {".".join(["base_model.model"] + k.split(".")[1:]): v for k, v in eva_state_dict.items()}
# cleanup ddp
model = model.module
# initialize peft model
peft_model = get_peft_model(model, peft_config, low_cpu_mem_usage=True)
initialize_lora_eva_weights(peft_model, eva_state_dict=eva_state_dict)
# setup training arguments
training_args = TrainingArguments(
per_device_train_batch_size=batch_size,
learning_rate=learning_rate,
gradient_accumulation_steps=gradient_accumulation_steps,
num_train_epochs=num_epochs,
output_dir=output_dir,
remove_unused_columns=False,
bf16=bf16,
)
# continue with standard finetuning
trainer = Trainer(
model=peft_model,
args=training_args,
train_dataset=dataset["train"],
data_collator=data_collator,
)
trainer.train()
| peft/examples/eva_finetuning/eva_finetuning_multi_gpu.py/0 | {
"file_path": "peft/examples/eva_finetuning/eva_finetuning_multi_gpu.py",
"repo_id": "peft",
"token_count": 1356
} |
import argparse
import os
from typing import Dict
import torch
from diffusers import UNet2DConditionModel
from safetensors.torch import save_file
from transformers import CLIPTextModel
from peft import PeftModel, get_peft_model_state_dict
# Default kohya_ss LoRA replacement modules
# https://github.com/kohya-ss/sd-scripts/blob/c924c47f374ac1b6e33e71f82948eb1853e2243f/networks/lora.py#L664
LORA_PREFIX_UNET = "lora_unet"
LORA_PREFIX_TEXT_ENCODER = "lora_te"
LORA_ADAPTER_NAME = "default"
def get_module_kohya_state_dict(
module: PeftModel, prefix: str, dtype: torch.dtype, adapter_name: str = LORA_ADAPTER_NAME
) -> Dict[str, torch.Tensor]:
kohya_ss_state_dict = {}
for peft_key, weight in get_peft_model_state_dict(module, adapter_name=adapter_name).items():
kohya_key = peft_key.replace("base_model.model", prefix)
kohya_key = kohya_key.replace("lora_A", "lora_down")
kohya_key = kohya_key.replace("lora_B", "lora_up")
kohya_key = kohya_key.replace(".", "_", kohya_key.count(".") - 2)
kohya_ss_state_dict[kohya_key] = weight.to(dtype)
# Set alpha parameter
if "lora_down" in kohya_key:
alpha_key = f"{kohya_key.split('.')[0]}.alpha"
kohya_ss_state_dict[alpha_key] = torch.tensor(module.peft_config[adapter_name].lora_alpha).to(dtype)
return kohya_ss_state_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--sd_checkpoint",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--sd_checkpoint_revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument("--peft_lora_path", default=None, type=str, required=True, help="Path to peft trained LoRA")
parser.add_argument(
"--dump_path",
default=None,
type=str,
required=True,
help="Path to the output safetensors file for use with webui.",
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
args = parser.parse_args()
# Store kohya_ss state dict
kohya_ss_state_dict = {}
dtype = torch.float16 if args.half else torch.float32
# Load Text Encoder LoRA model
text_encoder_peft_lora_path = os.path.join(args.peft_lora_path, "text_encoder")
if os.path.exists(text_encoder_peft_lora_path):
text_encoder = CLIPTextModel.from_pretrained(
args.sd_checkpoint, subfolder="text_encoder", revision=args.sd_checkpoint_revision
)
text_encoder = PeftModel.from_pretrained(
text_encoder, text_encoder_peft_lora_path, adapter_name=LORA_ADAPTER_NAME
)
kohya_ss_state_dict.update(
get_module_kohya_state_dict(text_encoder, LORA_PREFIX_TEXT_ENCODER, dtype, LORA_ADAPTER_NAME)
)
# Load UNet LoRA model
unet_peft_lora_path = os.path.join(args.peft_lora_path, "unet")
if os.path.exists(unet_peft_lora_path):
unet = UNet2DConditionModel.from_pretrained(
args.sd_checkpoint, subfolder="unet", revision=args.sd_checkpoint_revision
)
unet = PeftModel.from_pretrained(unet, unet_peft_lora_path, adapter_name=LORA_ADAPTER_NAME)
kohya_ss_state_dict.update(get_module_kohya_state_dict(unet, LORA_PREFIX_UNET, dtype, LORA_ADAPTER_NAME))
# Save state dict
save_file(
kohya_ss_state_dict,
args.dump_path,
)
| peft/examples/lora_dreambooth/convert_peft_sd_lora_to_kohya_ss.py/0 | {
"file_path": "peft/examples/lora_dreambooth/convert_peft_sd_lora_to_kohya_ss.py",
"repo_id": "peft",
"token_count": 1639
} |
<jupyter_start><jupyter_code>%env CUDA_VISIBLE_DEVICES=0
%env TOKENIZERS_PARALLELISM=false<jupyter_output>env: CUDA_VISIBLE_DEVICES=0
env: TOKENIZERS_PARALLELISM=false<jupyter_text>Initialize PolyModel<jupyter_code>import torch
from transformers import (
AutoModelForSeq2SeqLM,
AutoTokenizer,
default_data_collator,
Seq2SeqTrainingArguments,
Seq2SeqTrainer,
)
from datasets import load_dataset, concatenate_datasets
from peft import PolyConfig, get_peft_model, TaskType, PeftModel, PeftConfig
model_name_or_path = "google/flan-t5-xl"
r = 8 # rank of lora in poly
n_tasks = 4 # number of tasks
n_skills = 2 # number of skills (loras)
n_splits = 4 # number of heads
batch_size = 8
lr = 5e-5
num_epochs = 8
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
base_model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path, trust_remote_code=True)
peft_config = PolyConfig(
task_type=TaskType.SEQ_2_SEQ_LM,
poly_type="poly",
r=r,
n_tasks=n_tasks,
n_skills=n_skills,
n_splits=n_splits,
)
model = get_peft_model(base_model, peft_config)
model.print_trainable_parameters()<jupyter_output>trainable params: 9,441,792 || all params: 2,859,198,976 || trainable%: 0.33022507629773296<jupyter_text>Prepare datasetsFor this example, we selected four `SuperGLUE` benchmark datasets: `boolq`, `multirc`, `rte`, and `wic`, each with a training set of 1,000 examples and an evaluation set of 100 examples.<jupyter_code># boolq
boolq_dataset = (
load_dataset("super_glue", "boolq")
.map(
lambda x: {
"input": f"{x['passage']}\nQuestion: {x['question']}\nA. Yes\nB. No\nAnswer:",
# 0 - False
# 1 - True
"output": ["B", "A"][int(x["label"])],
"task_name": "boolq",
}
)
.select_columns(["input", "output", "task_name"])
)
print("boolq example: ")
print(boolq_dataset["train"][0])
# multirc
multirc_dataset = (
load_dataset("super_glue", "multirc")
.map(
lambda x: {
"input": (
f"{x['paragraph']}\nQuestion: {x['question']}\nAnswer: {x['answer']}\nIs it"
" true?\nA. Yes\nB. No\nAnswer:"
),
# 0 - False
# 1 - True
"output": ["B", "A"][int(x["label"])],
"task_name": "multirc",
}
)
.select_columns(["input", "output", "task_name"])
)
print("multirc example: ")
print(multirc_dataset["train"][0])
# rte
rte_dataset = (
load_dataset("super_glue", "rte")
.map(
lambda x: {
"input": (
f"{x['premise']}\n{x['hypothesis']}\nIs the sentence below entailed by the"
" sentence above?\nA. Yes\nB. No\nAnswer:"
),
# 0 - entailment
# 1 - not_entailment
"output": ["A", "B"][int(x["label"])],
"task_name": "rte",
}
)
.select_columns(["input", "output", "task_name"])
)
print("rte example: ")
print(rte_dataset["train"][0])
# wic
wic_dataset = (
load_dataset("super_glue", "wic")
.map(
lambda x: {
"input": (
f"Sentence 1: {x['sentence1']}\nSentence 2: {x['sentence2']}\nAre '{x['word']}'"
" in the above two sentences the same?\nA. Yes\nB. No\nAnswer:"
),
# 0 - False
# 1 - True
"output": ["B", "A"][int(x["label"])],
"task_name": "wic",
}
)
.select_columns(["input", "output", "task_name"])
)
print("wic example: ")
print(wic_dataset["train"][0])
# define a task2id map
TASK2ID = {
"boolq": 0,
"multirc": 1,
"rte": 2,
"wic": 3,
}
def tokenize(examples):
inputs, targets = examples["input"], examples["output"]
features = tokenizer(inputs, max_length=512, padding="max_length", truncation=True, return_tensors="pt")
labels = tokenizer(targets, max_length=2, padding="max_length", truncation=True, return_tensors="pt")
labels = labels["input_ids"]
labels[labels == tokenizer.pad_token_id] = -100
features["labels"] = labels
features["task_ids"] = torch.tensor([[TASK2ID[t]] for t in examples["task_name"]]).long()
return features
def get_superglue_dataset(
split="train",
n_samples=500,
):
ds = concatenate_datasets(
[
boolq_dataset[split].shuffle().select(range(n_samples)),
multirc_dataset[split].shuffle().select(range(n_samples)),
rte_dataset[split].shuffle().select(range(n_samples)),
wic_dataset[split].shuffle().select(range(n_samples)),
]
)
ds = ds.map(
tokenize,
batched=True,
remove_columns=["input", "output", "task_name"],
load_from_cache_file=False,
)
return ds<jupyter_output><empty_output><jupyter_text>As a toy example, we only select 1,000 from each subdataset for training and 100 each for eval.<jupyter_code>superglue_train_dataset = get_superglue_dataset(split="train", n_samples=1000)
superglue_eval_dataset = get_superglue_dataset(split="test", n_samples=100)<jupyter_output>Map: 100%|██████████| 4000/4000 [00:02<00:00, 1365.07 examples/s]
Map: 100%|██████████| 400/400 [00:00<00:00, 548.46 examples/s]<jupyter_text>Train and evaluate<jupyter_code># training and evaluation
def compute_metrics(eval_preds):
preds, labels = eval_preds
preds = [[i for i in seq if i != -100] for seq in preds]
labels = [[i for i in seq if i != -100] for seq in labels]
preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
correct = 0
total = 0
for pred, true in zip(preds, labels):
if pred.strip() == true.strip():
correct += 1
total += 1
accuracy = correct / total
return {"accuracy": accuracy}
training_args = Seq2SeqTrainingArguments(
"output",
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
learning_rate=lr,
num_train_epochs=num_epochs,
eval_strategy="epoch",
logging_strategy="epoch",
save_strategy="no",
report_to=[],
predict_with_generate=True,
generation_max_length=2,
remove_unused_columns=False,
)
trainer = Seq2SeqTrainer(
model=model,
tokenizer=tokenizer,
args=training_args,
train_dataset=superglue_train_dataset,
eval_dataset=superglue_eval_dataset,
data_collator=default_data_collator,
compute_metrics=compute_metrics,
)
trainer.train()
# saving model
model_name_or_path = "google/flan-t5-xl"
peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}"
model.save_pretrained(peft_model_id)
!ls -lh $peft_model_id<jupyter_output>total 37M
-rw-r--r-- 1 root root 374 12月 22 14:59 adapter_config.json
-rw-r--r-- 1 root root 37M 12月 22 14:59 adapter_model.safetensors
-rw-r--r-- 1 root root 5.0K 12月 22 14:58 README.md<jupyter_text>Load and infer<jupyter_code>device = "cuda:0" if torch.cuda.is_available() else "cpu"
peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}"
config = PeftConfig.from_pretrained(peft_model_id)
model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path)
model = PeftModel.from_pretrained(model, peft_model_id)
model = model.to(device)
model = model.eval()
i = 5
inputs = tokenizer(rte_dataset["validation"]["input"][i], return_tensors="pt")
inputs["task_ids"] = torch.LongTensor([TASK2ID["rte"]])
inputs = {k: v.to(device) for k, v in inputs.items()}
print(rte_dataset["validation"]["input"][i])
print(rte_dataset["validation"]["output"][i])
print(inputs)
with torch.no_grad():
outputs = model.generate(**inputs, max_new_tokens=2)
print(outputs[0])
print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])<jupyter_output>In 1979, the leaders signed the Egypt-Israel peace treaty on the White House lawn. Both President Begin and Sadat received the Nobel Peace Prize for their work. The two nations have enjoyed peaceful relations to this day.
The Israel-Egypt Peace Agreement was signed in 1979.
Is the sentence below entailed by the sentence above?
A. Yes
B. No
Answer:
A
{'input_ids': tensor([[ 86, 15393, 6, 8, 2440, 3814, 8, 10438, 18, 30387,
3065, 2665, 63, 30, 8, 1945, 1384, 8652, 5, 2867,
1661, 10129, 77, 11, 18875, 144, 1204, 8, 22232, 11128,
11329, 21, 70, 161, 5, 37, 192, 9352, 43, 2994,
9257, 5836, 12, 48, 239, 5, 37, 3352, 18, 427,
122, 63, 102, 17, 11128, 7139, 47, 3814, 16, 15393,
5, 27, 7, 8, 7142, 666, 3, 295, 10990, 57,
8, 7142, 756, 58, 71, 5, 2163, 272, 5, 465,
[...] | peft/examples/poly/peft_poly_seq2seq_with_generate.ipynb/0 | {
"file_path": "peft/examples/poly/peft_poly_seq2seq_with_generate.ipynb",
"repo_id": "peft",
"token_count": 4104
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from contextlib import contextmanager
from copy import deepcopy
from functools import update_wrapper
from types import MethodType
from torch import nn
from .peft_model import PeftConfig, PeftModel
from .tuners.lora import LoraLayer
def update_forward_signature(model: PeftModel) -> None:
"""
Updates the forward signature of the PeftModel to include parents class signature
model (`PeftModel`): Peft model to update the forward signature
Example:
```python
>>> from transformers import WhisperForConditionalGeneration
>>> from peft import get_peft_model, LoraConfig, update_forward_signature
>>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
>>> peft_config = LoraConfig(r=8, lora_alpha=32, lora_dropout=0.1, target_modules=["q_proj", "v_proj"])
>>> peft_model = get_peft_model(model, peft_config)
>>> update_forward_signature(peft_model)
```
"""
# Only update signature when the current forward signature only has *args and **kwargs
current_signature = inspect.signature(model.forward)
if (
len(current_signature.parameters) == 2
and "args" in current_signature.parameters
and "kwargs" in current_signature.parameters
):
forward = deepcopy(model.forward.__func__)
update_wrapper(
forward, type(model.get_base_model()).forward, assigned=("__doc__", "__name__", "__annotations__")
)
model.forward = MethodType(forward, model)
def update_generate_signature(model: PeftModel) -> None:
"""
Updates the generate signature of a PeftModel with overriding generate to include parents class signature
model (`PeftModel`): Peft model to update the generate signature
Example:
```python
>>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
>>> from peft import get_peft_model, LoraConfig, TaskType, update_generate_signature
>>> model_name_or_path = "bigscience/mt0-large"
>>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
>>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
>>> peft_config = LoraConfig(
... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1
... )
>>> peft_model = get_peft_model(model, peft_config)
>>> update_generate_signature(peft_model)
>>> help(peft_model.generate)
```
"""
if not hasattr(model, "generate"):
return
current_signature = inspect.signature(model.generate)
if (
len(current_signature.parameters) == 2
and "args" in current_signature.parameters
and "kwargs" in current_signature.parameters
) or (len(current_signature.parameters) == 1 and "kwargs" in current_signature.parameters):
generate = deepcopy(model.generate.__func__)
update_wrapper(
generate,
type(model.get_base_model()).generate,
assigned=("__doc__", "__name__", "__annotations__"),
)
model.generate = MethodType(generate, model)
def update_signature(model: PeftModel, method: str = "all") -> None:
"""
Updates the signature of a PeftModel include parents class signature for forward or generate method
model (`PeftModel`): Peft model to update generate or forward signature method (`str`): method to update
signature choose one of "forward", "generate", "all"
Example:
```python
>>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
>>> from peft import get_peft_model, LoraConfig, TaskType, update_signature
>>> model_name_or_path = "bigscience/mt0-large"
>>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
>>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
>>> peft_config = LoraConfig(
... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1
... )
>>> peft_model = get_peft_model(model, peft_config)
>>> update_signature(peft_model)
>>> help(peft_model.generate)
```
"""
if method == "forward":
update_forward_signature(model)
elif method == "generate":
update_generate_signature(model)
elif method == "all":
update_forward_signature(model)
update_generate_signature(model)
else:
raise ValueError(f"method {method} is not supported please choose one of ['forward', 'generate', 'all']")
def check_if_peft_model(model_name_or_path: str) -> bool:
"""
Check if the model is a PEFT model.
Args:
model_name_or_path (`str`):
Model id to check, can be local or on the Hugging Face Hub.
Returns:
`bool`: True if the model is a PEFT model, False otherwise.
"""
is_peft_model = True
try:
PeftConfig.from_pretrained(model_name_or_path)
except Exception:
# allow broad exceptions so that this works even if new exceptions are added on HF Hub side
is_peft_model = False
return is_peft_model
@contextmanager
def rescale_adapter_scale(model, multiplier):
"""
Context manager to temporarily rescale the scaling of the LoRA adapter in a model.
The original scaling values are restored when the context manager exits. This context manager works with the
transformers and diffusers models that have directly loaded LoRA adapters.
For LoRA, applying this context manager with multiplier in [0, 1] is strictly equivalent to applying
[wise-ft](https://arxiv.org/abs/2109.01903) (see [#1940](https://github.com/huggingface/peft/issues/1940) for
details). It can improve the performances of the model if there is a distribution shiftbetween the training data
used for fine-tuning, and the test data used during inference.
Warning: It has been reported that when using Apple's MPS backend for PyTorch, it is necessary to add a short sleep
time after exiting the context before the scales are fully restored.
Args:
model: The model containing `LoraLayer` modules whose scaling is to be adjusted.
multiplier (float or int):
The multiplier that rescales the `scaling` attribute. Must be of type float or int.
Raises:
ValueError: If the model does not contain any `LoraLayer`
instances, indicating that the model does not support scaling.
Example:
```python
>>> model = ModelWithLoraLayer()
>>> multiplier = 0.5
>>> with rescale_adapter_scale(model, multiplier):
... outputs = model(**inputs) # Perform operations with the scaled model
>>> outputs = model(**inputs) # The original scaling values are restored here
```
"""
# check if multiplier has a valid data type
if not isinstance(multiplier, (float, int)):
raise TypeError(f"Argument multiplier should be of type float, got {type(multiplier)}")
# iterate on the model's modules and grab the original scaling attribute
# from the lora layers if present
original_scaling = {}
for module in model.modules():
if isinstance(module, LoraLayer):
original_scaling[module] = module.scaling.copy()
module.scaling = {k: v * multiplier for k, v in module.scaling.items()}
# check whether scaling is prohibited on model
# the original scaling dictionary should be empty
# if there were no lora layers
if not original_scaling:
raise ValueError("scaling is only supported for models with `LoraLayer`s")
try:
yield
finally:
# restore original scaling values after exiting the context
for module, scaling in original_scaling.items():
module.scaling = scaling
@contextmanager
def disable_input_dtype_casting(model: nn.Module, active: bool = True):
"""
Context manager disables input dtype casting to the dtype of the weight.
Currently specifically works for LoRA.
Parameters:
model (nn.Module):
The model containing PEFT modules whose input dtype casting is to be adjusted.
active (bool):
Whether the context manager is active (default) or inactive.
"""
# Additional info: Normally, the dtype of the weight and input need to match, which is why the dtype is cast.
# However, in certain circumustances, this is handled by forward hooks, e.g. when using layerwise casting in
# diffusers. In that case, PEFT casting the dtype interferes with the layerwise casting, which is why the option to
# disable it is given.
if not active:
yield
return
original_values = {}
for name, module in model.named_modules():
if not isinstance(module, LoraLayer):
continue
original_values[name] = module.cast_input_dtype_enabled
module.cast_input_dtype_enabled = False
try:
yield
finally:
for name, module in model.named_modules():
if not isinstance(module, LoraLayer):
continue
if name in original_values:
module.cast_input_dtype_enabled = original_values[name]
| peft/src/peft/helpers.py/0 | {
"file_path": "peft/src/peft/helpers.py",
"repo_id": "peft",
"token_count": 3455
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import torch
from transformers.pytorch_utils import Conv1D
from peft.import_utils import is_bnb_4bit_available, is_bnb_available, is_gptqmodel_available
from peft.tuners.lora import LoraConfig, LoraModel
from peft.tuners.tuners_utils import BaseTunerLayer
from peft.utils import (
TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING,
_freeze_adapter,
_get_submodules,
get_auto_gptq_quant_linear,
get_gptqmodel_quant_linear,
get_quantization_config,
)
from peft.utils.integrations import gather_params_ctx
from .gptq import SVDQuantLinear
from .layer import AdaLoraLayer, RankAllocator, SVDLinear
class AdaLoraModel(LoraModel):
"""
Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper:
https://openreview.net/forum?id=lq62uWRJjiY
Args:
model ([`transformers.PreTrainedModel`]): The model to be adapted.
config ([`AdaLoraConfig`]): The configuration of the AdaLora model.
adapter_name (`str`): The name of the adapter, defaults to `"default"`.
low_cpu_mem_usage (`bool`, `optional`, defaults to `False`):
Create empty adapter weights on meta device. Useful to speed up the loading process.
Returns:
`torch.nn.Module`: The AdaLora model.
Example::
>>> from transformers import AutoModelForSeq2SeqLM >>> from peft import LoraConfig, AdaLoraModel, AdaLoraConfig
>>> config = AdaLoraConfig(
peft_type="ADALORA", task_type="SEQ_2_SEQ_LM", init_r=12, lora_alpha=32, target_modules=["q", "v"],
lora_dropout=0.01,
)
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> model = AdaLoraModel(model, config, "default")
**Attributes**:
- **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted.
- **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model.
"""
# Note: don't redefine prefix here, it should be inherited from LoraModel
def __init__(self, model, config, adapter_name):
super().__init__(model, config, adapter_name)
traininable_mode_counter = 0
for config in self.peft_config.values():
if not config.inference_mode:
traininable_mode_counter += 1
if traininable_mode_counter > 1:
raise ValueError(
"AdaLoraModel supports only 1 trainable adapter. "
"When using multiple adapters, set inference_mode to True for all adapters except the one you want to train."
)
if self.peft_config[adapter_name].inference_mode:
_freeze_adapter(self.model, adapter_name)
else:
self.trainable_adapter_name = adapter_name
self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name)
def _check_new_adapter_config(self, config: LoraConfig) -> None:
"""
A helper method to check the config when a new adapter is being added.
Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters.
"""
super()._check_new_adapter_config(config)
traininable_mode_counter = 0
for config_ in self.peft_config.values():
if not config_.inference_mode:
traininable_mode_counter += 1
if traininable_mode_counter > 1:
raise ValueError(
f"{self.__class__.__name__} supports only 1 trainable adapter. "
"When using multiple adapters, set inference_mode to True for all adapters except the one "
"you want to train."
)
def _create_and_replace(
self,
lora_config,
adapter_name,
target,
target_name,
parent,
current_key,
):
kwargs = {
"r": lora_config.init_r,
"lora_alpha": lora_config.lora_alpha,
"lora_dropout": lora_config.lora_dropout,
"fan_in_fan_out": lora_config.fan_in_fan_out,
"init_lora_weights": lora_config.init_lora_weights,
"loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False),
"loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False),
}
if (kwargs["loaded_in_8bit"] or kwargs["loaded_in_4bit"]) and not is_bnb_available():
raise ImportError(
"To use AdaLora with 8-bit quantization, please install the `bitsandbytes` package. "
"You can install it with `pip install bitsandbytes`."
)
quantization_config = get_quantization_config(self.model, method="gptq")
if quantization_config is not None:
kwargs["gptq_quantization_config"] = quantization_config
# If it is not an AdaLoraLayer, create a new module, else update it with new adapters
if not isinstance(target, AdaLoraLayer):
device_map = self.model.hf_device_map if hasattr(self.model, "hf_device_map") else None
new_module = self._create_new_module(lora_config, adapter_name, target, device_map=device_map, **kwargs)
if adapter_name not in self.active_adapters:
# adding an additional adapter: it is not automatically trainable
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
else:
target.update_layer(
adapter_name,
lora_config.init_r,
lora_config.lora_alpha,
lora_config.lora_dropout,
lora_config.init_lora_weights,
)
@staticmethod
def _create_new_module(lora_config, adapter_name, target, device_map=None, **kwargs):
# avoid eager bnb import
if is_bnb_available():
import bitsandbytes as bnb
from .bnb import SVDLinear8bitLt
if is_bnb_4bit_available():
from .bnb import SVDLinear4bit
gptq_quantization_config = kwargs.get("gptq_quantization_config", None)
if is_gptqmodel_available():
QuantLinear = get_gptqmodel_quant_linear(gptq_quantization_config, device_map=device_map)
else:
QuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config)
loaded_in_8bit = kwargs.pop("loaded_in_8bit", False)
loaded_in_4bit = kwargs.pop("loaded_in_4bit", False)
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
kwargs.update(
{
"has_fp16_weights": target_base_layer.state.has_fp16_weights,
"threshold": target_base_layer.state.threshold,
"index": target_base_layer.index,
}
)
new_module = SVDLinear8bitLt(target, adapter_name, **kwargs)
elif loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit):
fourbit_kwargs = kwargs.copy()
fourbit_kwargs.update(
{
"compute_dtype": target_base_layer.compute_dtype,
"compress_statistics": target_base_layer.weight.compress_statistics,
"quant_type": target_base_layer.weight.quant_type,
}
)
new_module = SVDLinear4bit(target, adapter_name, **fourbit_kwargs)
elif QuantLinear is not None and isinstance(target, QuantLinear):
new_module = SVDQuantLinear(target, adapter_name, **kwargs)
else:
if isinstance(target_base_layer, torch.nn.Linear):
if kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
"Setting fan_in_fan_out to False."
)
kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False
elif isinstance(target_base_layer, Conv1D):
if not kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to False but the target module is `Conv1D`. "
"Setting fan_in_fan_out to True."
)
kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True
else:
raise ValueError(
f"Target module {target} is not supported. "
f"Currently, only `torch.nn.Linear` and `Conv1D` are supported."
)
new_module = SVDLinear(target, adapter_name, **kwargs)
return new_module
@staticmethod
def _prepare_adapter_config(peft_config, model_config):
if peft_config.target_modules is None:
if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING:
raise ValueError("Please specify `target_modules` in `peft_config`")
peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[
model_config["model_type"]
]
return peft_config
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
if name == "model": # see #1892: prevent infinite recursion if class is not initialized
raise
return getattr(self.model, name)
def forward(self, *args, **kwargs):
outputs = self.model.forward(*args, **kwargs)
if (getattr(outputs, "loss", None) is not None) and isinstance(outputs.loss, torch.Tensor):
# Calculate the orthogonal regularization
orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight
if orth_reg_weight <= 0:
raise ValueError("orth_reg_weight should be greater than 0. ")
regu_loss = 0
num_param = 0
for n, p in self.model.named_parameters():
if ("lora_A" in n or "lora_B" in n) and self.trainable_adapter_name in n:
if p.shape == torch.Size([0]):
with gather_params_ctx(p, fwd_module=self):
para_cov = p @ p.T if "lora_A" in n else p.T @ p
else:
para_cov = p @ p.T if "lora_A" in n else p.T @ p
I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov)) # noqa: E741
I.requires_grad = False
num_param += 1
regu_loss += torch.norm(para_cov - I, p="fro")
if num_param > 0:
regu_loss = regu_loss / num_param
else:
regu_loss = 0
outputs.loss += orth_reg_weight * regu_loss
return outputs
def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name):
lora_config = self.peft_config[adapter_name]
for name, rank_idx in rank_pattern.items():
if isinstance(rank_idx, list):
rank = sum(rank_idx)
elif isinstance(rank_idx, torch.Tensor):
rank_idx = rank_idx.view(-1)
rank = rank_idx.sum().item()
else:
raise ValueError("Unexpected type of rank_idx")
key = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
_, target, _ = _get_submodules(self.model, key)
lora_E_weights = target.lora_E[adapter_name][rank_idx]
lora_A_weights = target.lora_A[adapter_name][rank_idx]
lora_B_weights = target.lora_B[adapter_name][:, rank_idx]
ranknum = target.ranknum[adapter_name]
target.update_layer(
adapter_name,
rank,
lora_config.lora_alpha,
lora_config.lora_dropout,
lora_config.init_lora_weights,
)
with torch.no_grad():
if rank > 0:
target.lora_E[adapter_name].copy_(lora_E_weights)
target.lora_A[adapter_name].copy_(lora_A_weights)
target.lora_B[adapter_name].copy_(lora_B_weights)
# The scaling is exactly as the previous
target.ranknum[adapter_name].copy_(ranknum)
def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name):
for name, rank_idx in rank_pattern.items():
rank = sum(rank_idx)
prefix = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
for layer in ["lora_E", "lora_A", "lora_B"]:
key = f"base_model.model.{prefix}.{layer}.{adapter_name}"
if layer != "lora_B":
state_dict[key] = (
state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key]
)
else:
state_dict[key] = (
state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key]
)
return state_dict
def update_and_allocate(self, global_step):
"""
This method updates Adalora budget and mask.
This should be called in every training step after `loss.backward()` and before `zero_grad()`.
`tinit`, `tfinal` and `deltaT` are handled with in the method.
Args:
global_step (`int`): The current training step, it is used to calculate adalora budget.
Example:
```python
>>> loss = model(**input).loss
>>> loss.backward()
>>> optimizer.step()
>>> model.base_model.update_and_allocate(i_step)
>>> optimizer.zero_grad()
```
"""
lora_config = self.peft_config[self.trainable_adapter_name]
# Update the importance score and allocate the budget
if global_step < lora_config.total_step - lora_config.tfinal:
_, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step)
if rank_pattern:
lora_config.rank_pattern = rank_pattern
# Finalize the budget allocation
elif global_step == lora_config.total_step - lora_config.tfinal:
_, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True)
# for some reason, this freezes the trainable parameters and nothing gets updates
# self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name)
lora_config.rank_pattern = rank_pattern
self.rankallocator.reset_ipt()
# Currently using inefficient way to mask the unimportant weights using the rank pattern
# due to problem mentioned above
elif global_step > lora_config.total_step - lora_config.tfinal:
self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern)
# Pass the function and do forward propagation
else:
return None
def add_weighted_adapter(self, *args, **kwargs):
"""This method is not supported for AdaLoRA, use LoRA instead."""
raise TypeError(f"{self.__class__.__name__} does not support add_weighted_adapter method.")
| peft/src/peft/tuners/adalora/model.py/0 | {
"file_path": "peft/src/peft/tuners/adalora/model.py",
"repo_id": "peft",
"token_count": 7652
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import re
import warnings
from dataclasses import asdict, replace
from enum import Enum
from typing import Optional
import torch
from torch import nn
from transformers.pytorch_utils import Conv1D
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists
from peft.utils import (
TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING,
TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING,
ModulesToSaveWrapper,
_freeze_adapter,
_get_submodules,
)
from .layer import Conv2d, Conv3d, IA3Layer, Linear
class IA3Model(BaseTuner):
"""
Creates a Infused Adapter by Inhibiting and Amplifying Inner Activations ((IA)^3) model from a pretrained
transformers model. The method is described in detail in https://arxiv.org/abs/2205.05638
Args:
model ([`~transformers.PreTrainedModel`]): The model to be adapted.
config ([`IA3Config`]): The configuration of the (IA)^3 model.
adapter_name (`str`): The name of the adapter, defaults to `"default"`.
low_cpu_mem_usage (`bool`, `optional`, defaults to `False`):
Create empty adapter weights on meta device. Useful to speed up the loading process.
Returns:
`torch.nn.Module`: The (IA)^3 model.
Example:
```py
>>> from transformers import AutoModelForSeq2SeqLM, ia3Config
>>> from peft import IA3Model, IA3Config
>>> config = IA3Config(
... peft_type="IA3",
... task_type="SEQ_2_SEQ_LM",
... target_modules=["k", "v", "w0"],
... feedforward_modules=["w0"],
... )
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
>>> ia3_model = IA3Model(config, model)
```
**Attributes**:
- **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.
- **peft_config** ([`ia3Config`]): The configuration of the (IA)^3 model.
"""
prefix: str = "ia3_"
def __init__(self, model, config, adapter_name, low_cpu_mem_usage: bool = False):
super().__init__(model, config, adapter_name, low_cpu_mem_usage=low_cpu_mem_usage)
@staticmethod
def _create_new_module(ia3_config, adapter_name, target, **kwargs):
# avoid eager bnb import
if is_bnb_available():
import bitsandbytes as bnb
from .bnb import Linear8bitLt
if is_bnb_4bit_available():
from .bnb import Linear4bit
loaded_in_8bit = kwargs.pop("loaded_in_8bit", False)
loaded_in_4bit = kwargs.pop("loaded_in_4bit", False)
is_feedforward = kwargs.pop("is_feedforward", False)
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
eightbit_kwargs = kwargs.copy()
eightbit_kwargs.update(
{
"has_fp16_weights": target_base_layer.state.has_fp16_weights,
"threshold": target_base_layer.state.threshold,
"index": target_base_layer.index,
}
)
new_module = Linear8bitLt(target, adapter_name, is_feedforward=is_feedforward, **eightbit_kwargs)
elif loaded_in_4bit and isinstance(target_base_layer, bnb.nn.Linear4bit):
fourbit_kwargs = kwargs.copy()
fourbit_kwargs.update(
{
"compute_dtype": target_base_layer.compute_dtype,
"compress_statistics": target_base_layer.weight.compress_statistics,
"quant_type": target_base_layer.weight.quant_type,
}
)
new_module = Linear4bit(target, adapter_name, is_feedforward=is_feedforward, **fourbit_kwargs)
elif isinstance(target, torch.nn.Conv2d):
new_module = Conv2d(target, adapter_name, is_feedforward=is_feedforward, **kwargs)
elif isinstance(target, torch.nn.Conv3d):
new_module = Conv3d(target, adapter_name, is_feedforward=is_feedforward, **kwargs)
elif isinstance(target_base_layer, torch.nn.Linear):
if kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
"Setting fan_in_fan_out to False."
)
kwargs["fan_in_fan_out"] = ia3_config.fan_in_fan_out = False
new_module = Linear(target, adapter_name, is_feedforward=is_feedforward, **kwargs)
elif isinstance(target_base_layer, Conv1D):
if not kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True."
)
kwargs["fan_in_fan_out"] = ia3_config.fan_in_fan_out = True
new_module = Linear(
target, adapter_name, is_feedforward=is_feedforward, is_target_conv_1d_layer=True, **kwargs
)
else:
raise ValueError(
f"Target module {target} is not supported. "
f"Currently, only `torch.nn.Linear`, `torch.nn.Conv2d`, and `Conv1D` are supported."
)
return new_module
@staticmethod
def _check_target_module_exists(ia3_config, key):
return check_target_module_exists(ia3_config, key)
def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
for n, p in model.named_parameters():
if self.prefix not in n:
p.requires_grad = False
def _create_and_replace(
self,
ia3_config,
adapter_name,
target,
target_name,
parent,
current_key,
):
# check if target module is in feedforward_modules
is_feedforward = self._check_target_module_feedforward(ia3_config, current_key)
kwargs = {
"fan_in_fan_out": ia3_config.fan_in_fan_out,
"init_ia3_weights": ia3_config.init_ia3_weights,
"is_feedforward": is_feedforward,
"loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False),
"loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False),
}
if isinstance(target, IA3Layer):
target.update_layer(
adapter_name,
ia3_config.init_ia3_weights,
)
else:
new_module = self._create_new_module(ia3_config, adapter_name, target, **kwargs)
if adapter_name not in self.active_adapters:
# adding an additional adapter: it is not automatically trainable
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
@staticmethod
def _check_target_module_feedforward(ia3_config, key) -> bool:
"""
A helper private method that checks if the target module `key` matches with a feedforward module specified in
`ia3_config`
"""
if isinstance(ia3_config.feedforward_modules, str):
is_feedforward = bool(re.fullmatch(ia3_config.feedforward_modules, key))
else:
is_feedforward = any(key.endswith(target_key) for target_key in ia3_config.feedforward_modules)
return is_feedforward
def _replace_module(self, parent, child_name, new_module, child):
setattr(parent, child_name, new_module)
# child layer wraps the original module, unpack it
if hasattr(child, "base_layer"):
child = child.base_layer
# layers with base_layer don't need the weight to be copied, as they have a reference already
if not hasattr(new_module, "base_layer"):
new_module.weight = child.weight
if hasattr(child, "bias"):
new_module.bias = child.bias
if getattr(child, "state", None) is not None:
if hasattr(new_module, "base_layer"):
new_module.base_layer.state = child.state
else:
new_module.state = child.state
new_module.to(child.weight.device)
meta = torch.device("meta")
# dispatch to correct device
for name, module in new_module.named_modules():
if self.prefix in name:
if not any(p.device == meta for p in module.parameters()):
module.to(child.weight.device)
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
if name == "model": # see #1892: prevent infinite recursion if class is not initialized
raise
return getattr(self.model, name)
def get_peft_config_as_dict(self, inference: bool = False):
config_dict = {}
for key, value in self.peft_config.items():
config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()}
if inference:
config["inference_mode"] = True
config_dict[key] = config
return config
def _set_adapter_layers(self, enabled=True):
for module in self.model.modules():
if isinstance(module, (IA3Layer, ModulesToSaveWrapper)):
module.enable_adapters(enabled)
def enable_adapter_layers(self) -> None:
"""Enable all adapters.
Call this if you have previously disabled all adapters and want to re-enable them.
"""
self._set_adapter_layers(enabled=True)
def disable_adapter_layers(self) -> None:
"""Disable all adapters.
When disabling all adapters, the model output corresponds to the output of the base model.
"""
self._set_adapter_layers(enabled=False)
def set_adapter(self, adapter_name: str | list[str]) -> None:
"""Set the active adapter(s).
Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is
not desired, use the following code.
```py
>>> for name, param in model_peft.named_parameters():
... if ...: # some check on name (ex. if 'lora' in name)
... param.requires_grad = False
```
Args:
adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated.
"""
for module in self.model.modules():
if isinstance(module, IA3Layer):
if module.merged:
warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.")
module.unmerge()
module.set_adapter(adapter_name)
self.active_adapter = adapter_name
@staticmethod
def _prepare_adapter_config(peft_config, model_config):
if peft_config.target_modules is None:
if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING:
raise ValueError("Please specify `target_modules` in `peft_config`")
peft_config.target_modules = set(
TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING[model_config["model_type"]]
)
if peft_config.feedforward_modules is None:
if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING:
raise ValueError("Please specify `feedforward_modules` in `peft_config`")
peft_config.feedforward_modules = set(
TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING[model_config["model_type"]]
)
return peft_config
def _unload_and_optionally_merge(
self, merge: bool = True, safe_merge: bool = False, adapter_names: Optional[list[str]] = None
):
r"""
This method merges the (IA)^3 layers into the base model. This is needed if someone wants to use the base model
as a standalone model.
Args:
safe_merge (`bool`, `optional`, defaults to `False`):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
if getattr(self.model, "is_loaded_in_8bit", False):
raise ValueError("Cannot merge ia3 layers when the model is loaded in 8-bit mode")
if getattr(self.model, "is_loaded_in_4bit", False):
raise ValueError("Cannot merge ia3 layers when the model is loaded in 4-bit mode")
self._unloading_checks(adapter_names)
key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
for key in key_list:
try:
parent, target, target_name = _get_submodules(self.model, key)
except AttributeError:
continue
if hasattr(target, "base_layer"):
if merge:
target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
self._replace_module(parent, target_name, target.get_base_layer(), target)
elif isinstance(target, ModulesToSaveWrapper):
# save any additional trainable modules part of `modules_to_save`
new_module = target.modules_to_save[target.active_adapter]
if hasattr(new_module, "base_layer"):
# check if the module is itself a tuner layer
if merge:
new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
new_module = new_module.get_base_layer()
setattr(parent, target_name, new_module)
return self.model
def merge_and_unload(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> torch.nn.Module:
r"""
This method merges the IA³ layers into the base model. This is needed if someone wants to use the base model as
a standalone model.
Args:
safe_merge (`bool`):
whether to activate the safe merging check to check if there is any potential Nan in the adapter
weights
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
Example:
```py
>>> from transformers import AutoModelForCausalLM
>>> from peft import PeftModel
>>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b")
>>> peft_model_id = "smangrul/falcon-40B-int4-peft-lora-sfttrainer-sample"
>>> model = PeftModel.from_pretrained(base_model, peft_model_id)
>>> merged_model = model.merge_and_unload()
```
"""
return self._unload_and_optionally_merge(safe_merge=safe_merge, adapter_names=adapter_names)
def unload(self) -> torch.nn.Module:
"""
Gets back the base model by removing all the IA³ modules without merging. This gives back the original base
model.
"""
return self._unload_and_optionally_merge(merge=False)
def delete_adapter(self, adapter_name: str) -> None:
"""
Deletes an existing adapter.
Args:
adapter_name (str): Name of the adapter to be deleted.
"""
if adapter_name not in self.peft_config:
raise ValueError(f"Adapter {adapter_name} does not exist")
del self.peft_config[adapter_name]
key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
new_adapter = None
for key in key_list:
_, target, _ = _get_submodules(self.model, key)
if isinstance(target, IA3Layer):
target.delete_adapter(adapter_name)
if new_adapter is None:
new_adapter = target.active_adapters[:]
self.active_adapter = new_adapter or []
def _check_add_weighted_adapter(self, adapters: list[str]) -> tuple[str, str]:
"""
Helper function to check if the arguments to add_weighted_adapter are valid and compatible with the underlying
model.
"""
# Validate existence of adapters
for adapter in adapters:
if adapter not in self.peft_config:
raise ValueError(f"Adapter {adapter} does not exist")
# Check for conflicting modules_to_save
modules_to_save_wrappers = [module for module in self.modules() if isinstance(module, ModulesToSaveWrapper)]
if any(
sum(adapter in wrapper.modules_to_save for adapter in adapters) > 1 for wrapper in modules_to_save_wrappers
):
raise ValueError("Cannot add weighted adapters targeting the same module with modules_to_save.")
# Ensure all adapters have compatible target and feedforward module types
target_module_types = {type(self.peft_config[adapter].target_modules) for adapter in adapters}
feedforward_module_types = {type(self.peft_config[adapter].feedforward_modules) for adapter in adapters}
if len(target_module_types) > 1 or len(feedforward_module_types) > 1:
raise ValueError("All adapter configs should have the same type for target and feedforward modules.")
# Combine target and feedforward modules
if str in target_module_types:
new_target_modules = "|".join(f"({self.peft_config[adapter].target_modules})" for adapter in adapters)
else:
new_target_modules = set.union(*(self.peft_config[adapter].target_modules for adapter in adapters))
if str in feedforward_module_types:
new_feedforward_modules = "|".join(
f"({self.peft_config[adapter].feedforward_modules})" for adapter in adapters
)
else:
new_feedforward_modules = set.union(
*(self.peft_config[adapter].feedforward_modules for adapter in adapters)
)
return new_target_modules, new_feedforward_modules
def add_weighted_adapter(
self,
adapters: list[str],
weights: list[float],
adapter_name: str,
) -> None:
"""
This method adds a new adapter by merging the given adapters with the given weights.
Args:
adapters (`list`):
List of adapter names to be merged.
weights (`list`):
List of weights for each adapter.
adapter_name (`str`):
Name of the new adapter.
"""
if adapter_name in list(self.peft_config.keys()):
return
new_target_modules, new_feedforward_modules = self._check_add_weighted_adapter(
adapters=adapters,
)
self.peft_config[adapter_name] = replace(
self.peft_config[adapters[0]],
target_modules=new_target_modules,
feedforward_modules=new_feedforward_modules,
)
self.inject_adapter(self.model, adapter_name)
# Do we really need that?
_freeze_adapter(self.model, adapter_name)
key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
for key in key_list:
_, target, _ = _get_submodules(self.model, key)
if isinstance(target, IA3Layer):
if adapter_name in target.ia3_l:
target_ia3_l = target.ia3_l[adapter_name]
else:
continue
target_ia3_l.data = target_ia3_l.data.zero_()
for adapter, weight in zip(adapters, weights):
if adapter in target.ia3_l:
current_adapter_ia3_l = target.ia3_l[adapter]
else:
continue
target_ia3_l.data += current_adapter_ia3_l.data * weight
| peft/src/peft/tuners/ia3/model.py/0 | {
"file_path": "peft/src/peft/tuners/ia3/model.py",
"repo_id": "peft",
"token_count": 9407
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from typing import Any, Optional
import bitsandbytes as bnb
import torch
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from peft.utils.integrations import dequantize_bnb_weight
from peft.utils.other import transpose
from .layer import LoraLayer
if is_bnb_available():
class Linear8bitLt(torch.nn.Module, LoraLayer):
# Lora implemented in a dense layer
def __init__(
self,
base_layer: torch.nn.Module,
adapter_name: str,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
init_lora_weights: bool = True,
use_rslora: bool = False,
use_dora: bool = False,
lora_bias: bool = False,
**kwargs,
) -> None:
super().__init__()
LoraLayer.__init__(self, base_layer)
self.fan_in_fan_out = False
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
r,
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
init_lora_weights=init_lora_weights,
use_rslora=use_rslora,
use_dora=use_dora,
lora_bias=lora_bias,
)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`list[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged.
Defaults to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter not in self.lora_A.keys():
continue
warnings.warn(
"Merge lora module to 8-bit linear may get different generations due to rounding errors."
)
lora_data = self.get_delta_weight(active_adapter)
weight = self.get_base_layer().weight
state = self.get_base_layer().state
if state.SCB is None:
state.SCB = weight.SCB
# Dequantize the result of identity matrix and int8 weight because bitsandbytes does not support int8
# dequantization directly
output = dequantize_bnb_weight(weight, state=state)
if not self.use_dora[active_adapter]:
w_data = output.to(lora_data.dtype).to(lora_data.device) + lora_data
else:
# handle dora
# since output already includes scaling, set it to 1 here
weight_norm = (
self.lora_magnitude_vector[active_adapter]
.get_weight_norm(output, lora_data, scaling=1)
.detach()
)
# We need to cache weight_norm because it has to be based on the original weights. We
# cannot calculate it on the fly based on the merged weights when unmerging because its a
# different value
self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
w_data = dora_factor.view(-1, 1) * (output + lora_data)
if safe_merge and not torch.isfinite(w_data).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
self.get_base_layer().weight = bnb.nn.Int8Params(
w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights
).to(weight.device)
if self.lora_bias[active_adapter]:
bias_data = self.get_base_layer().bias.data + self.lora_B[active_adapter].bias
if safe_merge and not torch.isfinite(bias_data):
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
self.get_base_layer().bias.data = bias_data
state.reset_grads()
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter not in self.lora_A.keys():
continue
warnings.warn(
"Unmerge lora module to 8-bit linear may get different generations due to rounding errors."
)
lora_data = self.get_delta_weight(active_adapter)
weight = self.get_base_layer().weight
state = self.get_base_layer().state
if state.SCB is None:
state.SCB = weight.SCB
output = dequantize_bnb_weight(weight, state=state)
if not self.use_dora[active_adapter]:
w_data = output.to(lora_data.dtype).to(lora_data.device) - lora_data
else:
weight_norm = self._cache_pop(f"{active_adapter}-weight_norm")
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
w_data = output.data / dora_factor.view(-1, 1) - lora_data
self.get_base_layer().weight = bnb.nn.Int8Params(
w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights
).to(weight.device)
if self.lora_bias[active_adapter]:
self.get_base_layer().bias.data -= self.lora_B[active_adapter].bias
state.reset_grads()
def get_delta_weight(self, adapter):
return (
transpose(
self.lora_B[adapter].weight @ self.lora_A[adapter].weight,
False,
)
* self.scaling[adapter]
)
def _mixed_batch_forward(
self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any
) -> torch.Tensor:
# This is a special method that handles the case when users pass the argument `adapter_names`. This is an
# extra argument that allows mixing different adapters in the same batch at inference time.
result = self.base_layer(x, *args, **kwargs)
unique_adapters = set(adapter_names)
sub_batch_indices_list = []
for adapter in unique_adapters:
sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter])
for i, active_adapter in enumerate(unique_adapters):
if active_adapter == "__base__":
continue
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
x = self._cast_input_dtype(x, lora_A.weight.dtype)
# getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear
# layer output
sub_batch = x[sub_batch_indices_list[i]]
output = lora_B(lora_A(dropout(sub_batch))) * scaling
if requires_conversion:
output = output.to(expected_dtype)
result[sub_batch_indices_list[i]] += output
return result
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
self._check_forward_args(x, *args, **kwargs)
adapter_names = kwargs.pop("adapter_names", None)
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif adapter_names is not None:
result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
x = self._cast_input_dtype(x, lora_A.weight.dtype)
if not self.use_dora[active_adapter]:
output = lora_B(lora_A(dropout(x))) * scaling
else:
if isinstance(dropout, torch.nn.Identity) or not self.training:
base_result = result
else:
x = dropout(x)
base_result = None
output = self.lora_magnitude_vector[active_adapter](
x,
lora_A=lora_A,
lora_B=lora_B,
scaling=scaling,
base_layer=self.get_base_layer(),
base_result=base_result,
)
if requires_conversion:
output = output.to(expected_dtype)
result = result + output
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "lora." + rep
def dispatch_bnb_8bit(target: torch.nn.Module, adapter_name: str, **kwargs):
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
loaded_in_8bit = kwargs.get("loaded_in_8bit", False)
if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
eightbit_kwargs = kwargs.copy()
eightbit_kwargs.update(
{
"has_fp16_weights": target.state.has_fp16_weights,
"threshold": target.state.threshold,
"index": target.index,
}
)
new_module = Linear8bitLt(target, adapter_name, **eightbit_kwargs)
return new_module
if is_bnb_4bit_available():
class Linear4bit(torch.nn.Module, LoraLayer):
# Lora implemented in a dense layer
def __init__(
self,
base_layer: torch.nn.Module,
adapter_name: str,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
init_lora_weights: bool = True,
use_rslora: bool = False,
use_dora: bool = False,
lora_bias: bool = False,
**kwargs,
) -> None:
super().__init__()
LoraLayer.__init__(self, base_layer)
self.fan_in_fan_out = False
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
r,
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
init_lora_weights=init_lora_weights,
use_rslora=use_rslora,
use_dora=use_dora,
lora_bias=lora_bias,
)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`list[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged.
Defaults to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter not in self.lora_A.keys():
continue
warnings.warn(
"Merge lora module to 4-bit linear may get different generations due to rounding errors."
)
# Refer to https://gist.github.com/ChrisHayduk/1a53463331f52dca205e55982baf9930
weight = self.get_base_layer().weight
kwargs = weight.__dict__
lora_data = self.get_delta_weight(active_adapter)
output = dequantize_bnb_weight(weight, state=weight.quant_state)
if not self.use_dora[active_adapter]:
w_data = output + lora_data
else:
# handle dora
# since output already includes scaling, set it to 1 here
weight_norm = (
self.lora_magnitude_vector[active_adapter]
.get_weight_norm(output, lora_data, scaling=1)
.detach()
)
# We need to cache weight_norm because it has to be based on the original weights. We
# cannot calculate it on the fly based on the merged weights when unmerging because its a
# different value
self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
w_data = dora_factor.view(-1, 1) * (output + lora_data)
if safe_merge and not torch.isfinite(w_data).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
if "bnb_quantized" in kwargs:
kwargs["bnb_quantized"] = False
kwargs["requires_grad"] = False
kwargs.pop("data", None)
# torch.compile can introduce attributes preceded by '_', remove them
kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_")}
self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), **kwargs).to(weight.device)
if self.lora_bias[active_adapter]:
bias_data = self.get_base_layer().bias.data + self.lora_B[active_adapter].bias
if safe_merge and not torch.isfinite(bias_data):
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
self.get_base_layer().bias.data = bias_data
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter not in self.lora_A.keys():
continue
warnings.warn(
"Unmerge lora module to 4-bit linear may get different generations due to rounding errors."
)
lora_data = self.get_delta_weight(active_adapter)
weight = self.get_base_layer().weight
kwargs = weight.__dict__
output = dequantize_bnb_weight(weight, state=weight.quant_state)
if not self.use_dora[active_adapter]:
w_data = output - lora_data
else:
weight_norm = self._cache_pop(f"{active_adapter}-weight_norm")
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
w_data = output.data / dora_factor.view(-1, 1) - lora_data
if "bnb_quantized" in kwargs:
kwargs["bnb_quantized"] = False
kwargs["requires_grad"] = False
kwargs.pop("data", None)
self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), **kwargs).to(weight.device)
if self.lora_bias[active_adapter]:
self.get_base_layer().bias.data -= self.lora_B[active_adapter].bias
def get_delta_weight(self, adapter):
return (
transpose(
self.lora_B[adapter].weight @ self.lora_A[adapter].weight,
False,
)
* self.scaling[adapter]
)
def _mixed_batch_forward(
self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any
) -> torch.Tensor:
# This is a special method that handles the case when users pass the argument `adapter_names`. This is an
# extra argument that allows mixing different adapters in the same batch at inference time.
result = self.base_layer(x, *args, **kwargs)
unique_adapters = set(adapter_names)
sub_batch_indices_list = []
for adapter in unique_adapters:
sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter])
for i, active_adapter in enumerate(unique_adapters):
if active_adapter == "__base__":
continue
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
x = self._cast_input_dtype(x, lora_A.weight.dtype)
# getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear
# layer output
sub_batch = x[sub_batch_indices_list[i]]
output = lora_B(lora_A(dropout(sub_batch))) * scaling
if requires_conversion:
output = output.to(expected_dtype)
result[sub_batch_indices_list[i]] += output
return result
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
self._check_forward_args(x, *args, **kwargs)
adapter_names = kwargs.pop("adapter_names", None)
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif adapter_names is not None:
result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
# As per Tim Dettmers, for 4bit, we need to defensively clone here.
# The reason is that in some cases, an error can occur that backprop
# does not work on a manipulated view. This issue may be solved with
# newer PyTorch versions but this would need extensive testing to be
# sure.
result = result.clone()
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
x = self._cast_input_dtype(x, lora_A.weight.dtype)
if not self.use_dora[active_adapter]:
output = lora_B(lora_A(dropout(x))) * scaling
else:
if isinstance(dropout, torch.nn.Identity) or not self.training:
base_result = result
else:
x = dropout(x)
base_result = None
output = self.lora_magnitude_vector[active_adapter](
x,
lora_A=lora_A,
lora_B=lora_B,
scaling=scaling,
base_layer=self.get_base_layer(),
base_result=base_result,
)
if requires_conversion:
output = output.to(expected_dtype)
result = result + output
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "lora." + rep
def dispatch_bnb_4bit(target: torch.nn.Module, adapter_name: str, **kwargs):
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
loaded_in_4bit = kwargs.get("loaded_in_4bit", False)
if loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit):
fourbit_kwargs = kwargs.copy()
fourbit_kwargs.update(
{
"compute_dtype": target_base_layer.compute_dtype,
"compress_statistics": target_base_layer.weight.compress_statistics,
"quant_type": target_base_layer.weight.quant_type,
}
)
new_module = Linear4bit(target, adapter_name, **fourbit_kwargs)
return new_module
| peft/src/peft/tuners/lora/bnb.py/0 | {
"file_path": "peft/src/peft/tuners/lora/bnb.py",
"repo_id": "peft",
"token_count": 13196
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.tuners.prompt_tuning import PromptTuningConfig
from peft.utils import PeftType
class MultitaskPromptTuningInit(str, enum.Enum):
# initialize prompt with text
TEXT = "TEXT"
# initialize prompt with random matrix
RANDOM = "RANDOM"
# average the prefix and column matrices obtained during source training
AVERAGE_SOURCE_TASKS = "AVERAGE_SOURCE_TASKS"
# pick prefix and column matrices for a particular task obtained during source training
EXACT_SOURCE_TASK = "EXACT_SOURCE_TASK"
# only use the prompt embeddings trained during source training
ONLY_SOURCE_SHARED = "ONLY_SOURCE_SHARED"
@dataclass
class MultitaskPromptTuningConfig(PromptTuningConfig):
prompt_tuning_init: Union[MultitaskPromptTuningInit, str] = field(
default=MultitaskPromptTuningInit.RANDOM,
metadata={
"help": (
"How to initialize the prompt tuning parameters. Can be one of TEXT, RANDOM, AVERAGE_SOURCE_TASKS, "
"EXACT_SOURCE_TASK, ONLY_SOURCE_SHARED."
),
},
)
prompt_tuning_init_state_dict_path: Optional[str] = field(
default=None,
metadata={
"help": (
"The path of source state dict. This is required when training the downstream target prompt from "
"the pretrained source prompt"
),
},
)
prompt_tuning_init_task: Optional[int] = field(default=0, metadata={"help": "source task id for initialization"})
num_ranks: Optional[int] = field(default=1, metadata={"help": "ranks"})
num_tasks: Optional[int] = field(default=1, metadata={"help": "number of tasks"})
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.MULTITASK_PROMPT_TUNING
| peft/src/peft/tuners/multitask_prompt_tuning/config.py/0 | {
"file_path": "peft/src/peft/tuners/multitask_prompt_tuning/config.py",
"repo_id": "peft",
"token_count": 899
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Based on https://github.com/THUDM/P-tuning-v2/blob/main/model/prefix_encoder.py
# with some refactor
import torch
class PrefixEncoder(torch.nn.Module):
r"""
The `torch.nn` model to encode the prefix.
Args:
config ([`PrefixTuningConfig`]): The configuration of the prefix encoder.
Example:
```py
>>> from peft import PrefixEncoder, PrefixTuningConfig
>>> config = PrefixTuningConfig(
... peft_type="PREFIX_TUNING",
... task_type="SEQ_2_SEQ_LM",
... num_virtual_tokens=20,
... token_dim=768,
... num_transformer_submodules=1,
... num_attention_heads=12,
... num_layers=12,
... encoder_hidden_size=768,
... )
>>> prefix_encoder = PrefixEncoder(config)
```
**Attributes**:
- **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prefix encoder.
- **transform** (`torch.nn.Sequential`) -- The two-layer MLP to transform the prefix embeddings if
`prefix_projection` is `True`.
- **prefix_projection** (`bool`) -- Whether to project the prefix embeddings.
Input shape: (`batch_size`, `num_virtual_tokens`)
Output shape: (`batch_size`, `num_virtual_tokens`, `2*layers*hidden`)
"""
def __init__(self, config):
super().__init__()
self.prefix_projection = config.prefix_projection
token_dim = config.token_dim
num_layers = config.num_layers
encoder_hidden_size = config.encoder_hidden_size
num_virtual_tokens = config.num_virtual_tokens
if self.prefix_projection and not config.inference_mode:
# Use a two-layer MLP to encode the prefix
self.embedding = torch.nn.Embedding(num_virtual_tokens, token_dim)
self.transform = torch.nn.Sequential(
torch.nn.Linear(token_dim, encoder_hidden_size),
torch.nn.Tanh(),
torch.nn.Linear(encoder_hidden_size, num_layers * 2 * token_dim),
)
else:
self.embedding = torch.nn.Embedding(num_virtual_tokens, num_layers * 2 * token_dim)
def forward(self, prefix: torch.Tensor):
if self.prefix_projection:
prefix_tokens = self.embedding(prefix)
past_key_values = self.transform(prefix_tokens)
else:
past_key_values = self.embedding(prefix)
return past_key_values
| peft/src/peft/tuners/prefix_tuning/model.py/0 | {
"file_path": "peft/src/peft/tuners/prefix_tuning/model.py",
"repo_id": "peft",
"token_count": 1228
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from dataclasses import dataclass
from typing import Optional
from peft.config import PeftConfig
from peft.utils.peft_types import PeftType
@dataclass
class XLoraConfig(PeftConfig):
r"""
This is the configuration class to store the configuration of a `XLoraModel`. When the config is reloaded, the
paths of the `adapters` field is disregarded in favor of the saved adapters. As such, only the keys matter during
loading.
Args:
hidden_size (`int`):
Hidden size of the base model.
adapters (`dict`):
Mapping of adapter names to the LoRA adapter id, as per PeftModel.load_adapter. *They will be automatically
loaded*, to use as LoRA experts. When using from_pretrained, pass the new adapters dict as a keyword
argument.
enable_softmax (`bool`, *optional*, defaults to `True`):
Enable softmax application for the X-LoRA classifier.
enable_softmax_topk (`bool`, *optional*, defaults to `False`):
Enable softmax application for the top-k LoRA adapters. Mutually exclusive to `enable_softmax` and must
only be set if `top_k_lora` is.
softmax_temperature (`float`, *optional*, defaults to 1.0):
Softmax temperature, lower yields sharper predictions
layerwise_scalings (`bool`, *optional*, defaults to `False`):
If True, generate scalings for each LoRA adapter (each layer). If this is False, then scalings will be
broadcasted, the same, to each layer.
top_k_lora (`int`, *optional*, defaults to None):
Sparsely select the top_k LoRA experts instead of the default dense method.
xlora_depth (`int`, *optional*, defaults to 1):
Depth of the X-LoRA classifier.
xlora_size (`int`, *optional*, defaults to 2048):
Hidden size of the X-LoRA classifier, irrelevant if `xlora_depth=1`.
xlora_dropout_p (`float`, *optional*, defaults to 0.2):
Dropout probability of the X-LoRA classifier, irrelevant if `xlora_depth=1`.
use_trainable_adapters (`bool`, *optional*, defaults to False):
Make the adapters trainable.
scaling_pass_value (`float`, *optional*, defaults to 0):
Scaling pass value.
global_scaling_weight (`float`, *optional*, defaults to 1):
Weight to multiply output of each LoRA adapter by.
"""
hidden_size: int = None # type: ignore
adapters: dict[str, str] = None # type: ignore
enable_softmax: bool = True
enable_softmax_topk: bool = False
layerwise_scalings: bool = False
xlora_depth: int = 1
xlora_size: int = 2048
xlora_dropout_p: float = 0.2
use_trainable_adapters: bool = False
softmax_temperature: float = 1.0
top_k_lora: Optional[int] = None
scaling_pass_value: float = 0.0
global_scaling_weight: float = 1.0
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.XLORA
if self.hidden_size is None:
warnings.warn(
"No value was provided for `hidden_size`. This will be set to 4096 by default, please ensure that this is correct."
)
self.hidden_size = 4096
if self.adapters is None:
warnings.warn(
"No value was provided for for `adapters`. This will be set to empty, please ensure that this is correct."
)
self.adapters = {}
if self.enable_softmax_topk and self.top_k_lora is None:
warnings.warn("`enable_softmax_topk` enabled `top_k_lora` is not set")
if self.enable_softmax_topk and self.enable_softmax:
warnings.warn(
"`enable_softmax_topk` and `enable_softmax` are both enabled. This will result in worse performance."
)
if self.top_k_lora is not None and self.top_k_lora < 1:
warnings.warn("`top_k_lora` value must be at least 1.")
| peft/src/peft/tuners/xlora/config.py/0 | {
"file_path": "peft/src/peft/tuners/xlora/config.py",
"repo_id": "peft",
"token_count": 1765
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Regression testing: check that checkpoints from previous PEFT versions still return the same values.
#
# For normal regression testing, just run:
#
# `pytest tests/regression/test_regression.py -s --regression`
#
# Add `-s` to show potentially useful debugging information. `--regression` is a custom marker that is required for
# regression tests not to be skipped.
#
# To create new regression tests, run:
# `HF_TOKEN=<token> REGRESSION_CREATION_MODE=True pytest tests/regression/test_regression.py -s --regression`
#
# This will *fail* if:
#
# 1. the git worktree is dirty
# 2. the git commit is not tagged
#
# Note: A Hugging Face Hub token is required to upload the regression artifacts to our
# https://huggingface.co/peft-internal-testing repo. This can be done by anyone with write access to the repo but
# apparently it is not possible to create a technical token with write access.
#
# This is important to ensure that the regression artifacts correspond to a specific released version of PEFT.
# Therefore, it is recommended to checkout the tag before running the regression tests, e.g. by running:
#
# `git checkout v0.1.0`
#
# To override these checks, run:
# ``HF_TOKEN=<token> REGRESSION_CREATION_MODE=True REGRESSION_FORCE_MODE=True pytest tests/regression/test_regression.py -s --regression`
#
# In REGRESSION_CREATION_MODE, one directory will be created in tests/regression/<TEST_NAME>/<PEFT_VERSION>/ for each
# test. This will contain the saved adapter, as well as the output of the test of the model for that version.
#
# In normal testing mode, the saved adapter and output for each version found in the directory
# tests/regression/<TEST_NAME>/ will be loaded and compared to the current output.
#
# When implementing new tests, check the existing ones as well as the description in the docstring of RegressionTester.
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
import pytest
import torch
from huggingface_hub import snapshot_download, upload_folder
from torch import nn
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
from transformers.pytorch_utils import Conv1D
import peft
from peft import (
AdaLoraConfig,
BOFTConfig,
IA3Config,
LNTuningConfig,
LoHaConfig,
LoKrConfig,
LoraConfig,
PeftModel,
VBLoRAConfig,
VeraConfig,
get_peft_model,
)
from peft.utils import infer_device
PEFT_VERSION = peft.__version__
REGRESSION_DIR = tempfile.mkdtemp(prefix="peft_regression_")
HF_TOKEN = os.environ.get("HF_TOKEN")
# the repo has to be created manually once, it is not automatically created
HF_REPO = "peft-internal-testing/regression-tests"
@pytest.fixture(scope="session", autouse=True)
def setup_tearndown():
# Use a pytest session-scoped fixture to setup and teardown exactly once per session. AFAICT, unittest does not
# provide such a feature
# download regression artifacts from Hugging Face Hub at the start
snapshot_download(
repo_id=HF_REPO,
local_dir=REGRESSION_DIR,
# Don't use symlink, because this prevents us from properly cleaning up the files once finished
local_dir_use_symlinks=False,
)
yield
# delete regression artifacts at the end of the test session; optionally, upload them first if in creation mode
creation_mode = strtobool(os.environ.get("REGRESSION_CREATION_MODE", "False"))
if creation_mode:
# upload the regression directory to Hugging Face Hub, will overwrite by default
upload_folder(
repo_id=HF_REPO,
folder_path=REGRESSION_DIR,
token=HF_TOKEN,
)
shutil.rmtree(REGRESSION_DIR)
def strtobool(val):
"""Copied from distutils.util"""
val = val.lower()
if val in ("y", "yes", "t", "true", "on", "1"):
return 1
elif val in ("n", "no", "f", "false", "off", "0"):
return 0
else:
raise ValueError(f"invalid truth value {val!r}")
# same as in ..testing_utils.py but cannot be imported
def require_torch_gpu(test_case):
"""
Decorator marking a test that requires a GPU. Will be skipped when no GPU is available.
Copies from tsting_utils.py.
"""
if not torch.cuda.is_available():
return unittest.skip("test requires GPU")(test_case)
else:
return test_case
# same as in ..testing_utils.py but cannot be imported
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library is not installed.
Copies from tsting_utils.py.
"""
try:
import bitsandbytes # noqa: F401
except ImportError:
return unittest.skip("test requires bitsandbytes")(test_case)
else:
return test_case
def save_output(output, name, force=False):
path = os.path.join(REGRESSION_DIR, name, PEFT_VERSION)
filename = os.path.join(path, "output.pt")
if os.path.exists(filename) and not force:
return
if not os.path.exists(path):
os.makedirs(path)
if os.path.exists(filename) and force:
print(f"Overriding existing output in {filename}", file=sys.stderr)
torch.save(output, filename)
def save_model(model, name, force=False):
path = os.path.join(REGRESSION_DIR, name, PEFT_VERSION)
filename = os.path.join(path, peft.utils.SAFETENSORS_WEIGHTS_NAME)
if os.path.exists(filename) and not force:
return
if not os.path.exists(path):
os.makedirs(path)
if os.path.exists(filename) and force:
print(f"Overriding existing model in {path}", file=sys.stderr)
model.save_pretrained(path)
def load_output(name):
filename = os.path.join(REGRESSION_DIR, name, "output.pt")
return torch.load(filename, map_location=infer_device())
@pytest.mark.regression
class RegressionTester(unittest.TestCase):
"""Base class for regression testing
Child classes must call assert_results_equal_or_store and pass the model outtput, as well as a unique name that
describes the setting (e.g. "lora_opt-350m_bnb_4bit"). They also need to implement get_output(model) to get the
model output, and load_base_model(name) to load the base model. Don't forget to fix the seed in load_base_model.
"""
torch_device = infer_device()
def setUp(self):
self.tol = 1e-4
self.creation_mode = strtobool(os.environ.get("REGRESSION_CREATION_MODE", "False"))
self.force_mode = strtobool(os.environ.get("REGRESSION_FORCE_MODE", "False"))
if self.force_mode and not self.creation_mode:
raise RuntimeError("REGRESSION_FORCE_MODE can only be used together with REGRESSION_CREATION_MODE")
if self.creation_mode:
self.check_clean_git_status(self.force_mode)
if HF_TOKEN is None:
raise RuntimeError("HF_TOKEN environment variable must be set in creation mode")
def fix_seed(self):
torch.manual_seed(0)
def check_clean_git_status(self, force):
"""Ensure that worktree is not dirty and version tag is checked out"""
# check that the worktree is clean
try:
subprocess.check_output(["git", "diff", "--quiet", "HEAD"])
except subprocess.CalledProcessError as exc:
if force:
print("Overriding despite dirty git worktree", file=sys.stderr)
else:
raise RuntimeError("Git worktree is dirty") from exc
# check that the commit is tagged
try:
subprocess.check_output(["git", "describe", "--exact-match", "HEAD"])
except subprocess.CalledProcessError as exc:
if force:
print("Overriding despite non-tagged commit", file=sys.stderr)
else:
raise RuntimeError("Git commit is not tagged") from exc
def assert_results_equal_or_store(self, model, name):
"""Check if the outputs are the same or save the outputs if in creation mode."""
if not self.creation_mode: # normal regression testing mode
self._assert_results_equal(name)
else:
output = self.get_output(model)
if not torch.isfinite(output).all():
raise RuntimeError(f"Model output for {name} is not finite")
output2 = self.get_output(model)
if not torch.allclose(output, output2):
raise RuntimeError(f"Model output for {name} is not deterministic")
save_output(output, name, force=self.force_mode)
save_model(model, name, force=self.force_mode)
def _assert_results_equal(self, name):
path = os.path.join(REGRESSION_DIR, name)
versions = os.listdir(path)
for version in versions: # each directory corresponds to a version
output_loaded = load_output(os.path.join(name, version))
base_model = self.load_base_model()
model = PeftModel.from_pretrained(base_model, os.path.join(path, version))
output = self.get_output(model)
assert torch.allclose(output_loaded, output, atol=self.tol, rtol=self.tol)
def get_output(self, model):
raise NotImplementedError
def load_base_model(self):
raise NotImplementedError
##############
# TEST CASES #
##############
class TestMlp(RegressionTester):
def get_output(self, model):
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
with torch.inference_mode():
output = model(input)
return output
def load_base_model(self):
class MLP(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.lin0 = nn.Linear(10, 20, bias=bias)
self.relu = nn.ReLU()
self.lin1 = nn.Linear(20, 2, bias=bias)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = X.float()
X = self.lin0(X)
X = self.relu(X)
X = self.lin1(X)
X = self.sm(X)
return X
self.fix_seed()
return MLP().to(self.torch_device)
def test_lora(self):
base_model = self.load_base_model()
config = LoraConfig(
r=8,
init_lora_weights=False,
target_modules=["lin0"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lora_mlp")
def test_lora_dora(self):
base_model = self.load_base_model()
config = LoraConfig(
r=8,
init_lora_weights=False,
target_modules=["lin0"],
use_dora=True,
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lora_dora_mlp")
def test_adalora(self):
base_model = self.load_base_model()
config = AdaLoraConfig(
r=8,
init_lora_weights=False,
target_modules=["lin0"],
total_step=1,
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "adalora_mlp")
def test_ia3(self):
base_model = self.load_base_model()
config = IA3Config(
init_ia3_weights=False,
target_modules=["lin0"],
feedforward_modules=["lin0"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "ia3_mlp")
def test_ia3_no_ff(self):
base_model = self.load_base_model()
config = IA3Config(
init_ia3_weights=False,
target_modules=["lin0"],
feedforward_modules=[],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "ia3_no_ff_mlp")
def test_loha(self):
# TODO
self.skipTest("Skipping LoHa for now because init is not seedable")
base_model = self.load_base_model()
config = LoHaConfig(
r=8,
init_weights=False,
target_modules=["lin0"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "loha_mlp")
def test_lokr(self):
# TODO
self.skipTest("Skipping LoKr for now because init is not seedable")
base_model = self.load_base_model()
config = LoKrConfig(
r=8,
target_modules=["lin0"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lokr_mlp")
def test_lora_modules_to_save(self):
base_model = self.load_base_model()
config = LoraConfig(
r=8,
init_lora_weights=False,
target_modules=["lin0"],
modules_to_save=["lin1"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lora_mlp_modules_to_save")
def test_boft(self):
base_model = self.load_base_model()
config = BOFTConfig(
boft_block_size=2,
target_modules=["lin0"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "boft_mlp")
def test_ln_tuning(self):
base_model = self.load_base_model()
config = LNTuningConfig(target_modules=["lin0"])
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "ln_tuning_mlp")
def test_vera_tuning(self):
base_model = self.load_base_model()
config = VeraConfig(target_modules=["lin0"])
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "vera_tuning_mlp")
def test_vblora_tuning(self):
base_model = self.load_base_model()
config = VBLoRAConfig(
vector_length=1,
num_vectors=2,
target_modules=["lin0"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "vblora_tuning_mlp")
class TestLoraEmbConv1D(RegressionTester):
def get_output(self, model):
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
with torch.inference_mode():
output = model(input)
return output
def load_base_model(self):
class ModelEmbConv1D(nn.Module):
def __init__(self):
super().__init__()
self.emb = nn.Embedding(100, 5)
self.conv1d = Conv1D(1, 5)
self.relu = nn.ReLU()
self.flat = nn.Flatten()
self.lin0 = nn.Linear(10, 2)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = self.emb(X)
X = self.conv1d(X)
X = self.relu(X)
X = self.flat(X)
X = self.lin0(X)
X = self.sm(X)
return X
self.fix_seed()
return ModelEmbConv1D().to(self.torch_device)
def test_lora(self):
base_model = self.load_base_model()
config = LoraConfig(
r=8,
init_lora_weights=False,
target_modules=["emb", "conv1d"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lora_emb_conv1d")
class TestLoraConv2D(RegressionTester):
def get_output(self, model):
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
with torch.inference_mode():
output = model(input)
return output
def load_base_model(self):
class ModelConv2D(nn.Module):
def __init__(self):
super().__init__()
self.conv2d = nn.Conv2d(5, 10, 3)
self.relu = nn.ReLU()
self.flat = nn.Flatten()
self.lin0 = nn.Linear(10, 2)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = X.float().reshape(2, 5, 3, 3)
X = self.conv2d(X)
X = self.relu(X)
X = self.flat(X)
X = self.lin0(X)
X = self.sm(X)
return X
self.fix_seed()
return ModelConv2D().to(self.torch_device)
def test_lora(self):
base_model = self.load_base_model()
config = LoraConfig(
r=8,
init_lora_weights=False,
target_modules=["conv2d"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lora_conv2d")
def test_ia3(self):
base_model = self.load_base_model()
config = IA3Config(
init_ia3_weights=False,
target_modules=["conv2d"],
feedforward_modules=["conv2d"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "ia3_conv2d")
def test_loha(self):
# TODO
self.skipTest("Skipping LoHa for now because init is not seedable")
base_model = self.load_base_model()
config = LoHaConfig(
r=8,
init_weights=False,
target_modules=["conv2d"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "loha_conv2d")
def test_lokr(self):
# TODO
self.skipTest("Skipping LoKr for now because init is not seedable")
base_model = self.load_base_model()
config = LoKrConfig(
r=8,
init_weights=False,
target_modules=["conv2d"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lokr_conv2d")
def test_boft(self):
base_model = self.load_base_model()
config = BOFTConfig(
boft_block_size=3,
target_modules=["conv2d"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "boft_conv2d")
class TestOpt(RegressionTester):
def get_output(self, model):
input = torch.LongTensor([[1, 0, 1, 0, 1, 2]]).to(self.torch_device)
with torch.inference_mode():
output = model(input).logits
return output
def load_base_model(self):
self.fix_seed()
return AutoModelForCausalLM.from_pretrained("facebook/opt-350m").to(self.torch_device)
def test_lora(self):
base_model = self.load_base_model()
config = LoraConfig(
r=8,
init_lora_weights=False,
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lora_opt-350m")
def test_adalora(self):
base_model = self.load_base_model()
config = AdaLoraConfig(
r=8,
init_lora_weights=False,
total_step=1,
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "adalora_opt-350m")
def test_ia3(self):
base_model = self.load_base_model()
config = IA3Config(init_ia3_weights=False)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "ia3_opt-350m")
@require_torch_gpu
@require_bitsandbytes
class TestOpt8bitBnb(RegressionTester):
def get_output(self, model):
input = torch.LongTensor([[1, 0, 1, 0, 1, 2]]).to(self.torch_device)
with torch.inference_mode():
output = model(input).logits
return output
def load_base_model(self):
self.fix_seed()
model = AutoModelForCausalLM.from_pretrained(
"facebook/opt-350m",
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
)
return model
def test_lora_8bit(self):
# Warning: bnb results can vary significantly depending on the GPU. Therefore, if there is a change in GPU used
# in the CI, the test can fail without any code change. In that case, delete the regression artifact and create
# a new one using the new GPU.
base_model = self.load_base_model()
config = LoraConfig(
r=8,
init_lora_weights=False,
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lora_opt-350m_bnb_8bit")
def test_adalora(self):
# TODO
self.skipTest(
"Skipping AdaLora for now, getting TypeError: unsupported operand type(s) for +=: 'dict' and 'Tensor'"
)
# Warning: bnb results can vary significantly depending on the GPU. Therefore, if there is a change in GPU used
# in the CI, the test can fail without any code change. In that case, delete the regression artifact and create
# a new one using the new GPU.
base_model = self.load_base_model()
config = AdaLoraConfig(
init_r=6,
target_r=4,
tinit=50,
tfinal=100,
total_step=200,
deltaT=5,
beta1=0.3,
beta2=0.3,
orth_reg_weight=0.2,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "adalora_opt-350m_8bit")
@require_torch_gpu
@require_bitsandbytes
class TestOpt4bitBnb(RegressionTester):
def get_output(self, model):
input = torch.LongTensor([[1, 0, 1, 0, 1, 2]]).to(self.torch_device)
with torch.inference_mode():
output = model(input).logits
return output
def load_base_model(self):
self.fix_seed()
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=False,
bnb_4bit_compute_dtype=torch.float32,
)
model = AutoModelForCausalLM.from_pretrained(
"facebook/opt-350m",
quantization_config=bnb_config,
torch_dtype=torch.float32,
)
return model
def test_lora_4bit(self):
# Warning: bnb results can vary significantly depending on the GPU. Therefore, if there is a change in GPU used
# in the CI, the test can fail without any code change. In that case, delete the regression artifact and create
# a new one using the new GPU.
base_model = self.load_base_model()
config = LoraConfig(
r=8,
init_lora_weights=False,
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lora_opt-350m_bnb_4bit")
def test_adalora(self):
# TODO
self.skipTest("Skipping AdaLora for now because of a bug, see #1113")
# Warning: bnb results can vary significantly depending on the GPU. Therefore, if there is a change in GPU used
# in the CI, the test can fail without any code change. In that case, delete the regression artifact and create
# a new one using the new GPU.
base_model = self.load_base_model()
config = AdaLoraConfig(
init_r=6,
target_r=4,
tinit=50,
tfinal=100,
total_step=200,
deltaT=5,
beta1=0.3,
beta2=0.3,
orth_reg_weight=0.2,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "adalora_opt-350m_4bit")
| peft/tests/regression/test_regression.py/0 | {
"file_path": "peft/tests/regression/test_regression.py",
"repo_id": "peft",
"token_count": 11016
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import platform
import re
from collections import defaultdict
from contextlib import contextmanager
from copy import deepcopy
from unittest.mock import patch
import pytest
import torch
from datasets import Dataset, load_dataset
from huggingface_hub.utils import reset_sessions
from safetensors.torch import load_file
from scipy import stats
from torch import nn
from torch.utils.data import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import (
AdaLoraConfig,
EvaConfig,
IA3Config,
LoftQConfig,
LoKrConfig,
LoraConfig,
PeftMixedModel,
PeftModel,
PeftModelForCausalLM,
PeftModelForFeatureExtraction,
PeftModelForQuestionAnswering,
PeftModelForSeq2SeqLM,
PeftModelForSequenceClassification,
PeftModelForTokenClassification,
PromptTuningConfig,
VBLoRAConfig,
VeraConfig,
get_eva_state_dict,
get_peft_model,
initialize_lora_eva_weights,
inject_adapter_in_model,
set_peft_model_state_dict,
)
from peft.mapping import PEFT_TYPE_TO_PREFIX_MAPPING
from peft.tuners.lora.config import CordaConfig
from peft.tuners.lora.corda import preprocess_corda
from peft.tuners.lora.layer import LoraLayer
from peft.utils import infer_device
from peft.utils.hotswap import hotswap_adapter, prepare_model_for_compiled_hotswap
class TestLoraInitialization:
"""Test class to check the initialization of LoRA adapters."""
torch_device = infer_device()
def get_uniform(self, amin, amax, size=(10000,)):
unif = torch.distributions.uniform.Uniform(amin, amax)
samples = unif.sample(size)
return samples
def get_normal(self, mean, std, size=(10000,)):
normal = torch.distributions.normal.Normal(mean, std)
samples = normal.sample(size)
return samples
def get_model(self):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
# choose a large weight so that averages are close to expected values
self.linear = nn.Linear(1000, 1000)
self.embed = nn.Embedding(1000, 1000)
self.conv2d = nn.Conv2d(100, 100, 3)
def forward(self, x):
x_int = (100 * x).int()
x_4d = x.flatten().reshape(1, 100, 10, 10)
return self.linear(x), self.embed(x_int), self.conv2d(x_4d)
return MyModule().eval().to(self.torch_device)
@pytest.fixture
def data(self):
return torch.rand(10, 1000).to(self.torch_device)
def test_lora_linear_init_default(self):
# default is True
torch.manual_seed(0)
model = self.get_model()
config = LoraConfig(target_modules=["linear"])
model = get_peft_model(model, config)
weight_A = model.linear.lora_A["default"].weight
weight_B = model.linear.lora_B["default"].weight
# use statistical test to check if weight A is from a uniform distribution
unif = self.get_uniform(weight_A.min().item(), weight_A.max().item())
_, p_value = stats.kstest(weight_A.detach().flatten().cpu().numpy(), unif.flatten().cpu().numpy())
assert p_value > 0.5
# check that weight A is *not* from a normal distribution
normal = self.get_normal(weight_A.mean().item(), weight_A.std().item())
_, p_value = stats.kstest(weight_A.detach().flatten().cpu().numpy(), normal.flatten().cpu().numpy())
assert p_value < 0.05
# check that weight B is zero
assert (weight_B == 0.0).all()
def test_lora_linear_init_gaussian(self):
# use gaussian init
torch.manual_seed(0)
model = self.get_model()
config = LoraConfig(target_modules=["linear"], init_lora_weights="gaussian")
model = get_peft_model(model, config)
weight_A = model.linear.lora_A["default"].weight
weight_B = model.linear.lora_B["default"].weight
# use statistical test to check if weight A is from a normal distribution
normal = self.get_normal(0.0, 1 / config.r)
_, p_value = stats.kstest(weight_A.detach().flatten().cpu().numpy(), normal.flatten().cpu().numpy())
assert p_value > 0.5
# check that weight A is *not* from a uniform distribution
unif = self.get_uniform(weight_A.min().item(), weight_A.max().item())
_, p_value = stats.kstest(weight_A.detach().flatten().cpu().numpy(), unif.flatten().cpu().numpy())
assert p_value < 0.05
# check that weight B is zero
assert (weight_B == 0.0).all()
def test_lora_linear_false(self):
torch.manual_seed(0)
model = self.get_model()
config = LoraConfig(target_modules=["linear"], init_lora_weights=False)
model = get_peft_model(model, config)
weight_B = model.linear.lora_B["default"].weight
# with init_lora_weights=False, weight B should *not* be zero. We don't care so much about the actual values
# as long as they are not zero, in order to avoid identity transformation.
assert not torch.allclose(weight_B, torch.zeros_like(weight_B))
def test_lora_embedding_default(self):
# embedding is initialized as a normal distribution, not kaiming uniform
torch.manual_seed(0)
model = self.get_model()
config = LoraConfig(target_modules=["embed"])
model = get_peft_model(model, config)
weight_A = model.embed.lora_embedding_A["default"]
weight_B = model.embed.lora_embedding_B["default"]
# use statistical test to check if weight B is from a normal distribution
normal = self.get_normal(0.0, 1.0)
_, p_value = stats.kstest(weight_B.detach().flatten().cpu().numpy(), normal.flatten().cpu().numpy())
assert p_value > 0.5
# check that weight B is *not* from a uniform distribution
unif = self.get_uniform(weight_B.min().item(), weight_B.max().item())
_, p_value = stats.kstest(weight_B.detach().flatten().cpu().numpy(), unif.flatten().cpu().numpy())
assert p_value < 0.05
# check that weight A is zero
assert (weight_A == 0.0).all()
def test_lora_embedding_gaussian(self):
# embedding does not change with init_lora_weights="gaussian" vs True
torch.manual_seed(0)
model = self.get_model()
config = LoraConfig(target_modules=["embed"], init_lora_weights="gaussian")
model = get_peft_model(model, config)
weight_A = model.embed.lora_embedding_A["default"]
weight_B = model.embed.lora_embedding_B["default"]
# use statistical test to check if weight B is from a normal distribution
normal = self.get_normal(0.0, 1.0)
_, p_value = stats.kstest(weight_B.detach().flatten().cpu().numpy(), normal.flatten().cpu().numpy())
assert p_value > 0.5
# check that weight B is *not* from a uniform distribution
unif = self.get_uniform(weight_B.min().item(), weight_B.max().item())
_, p_value = stats.kstest(weight_B.detach().flatten().cpu().numpy(), unif.flatten().cpu().numpy())
assert p_value < 0.05
# check that weight A is zero
assert (weight_A == 0.0).all()
def test_lora_embedding_false(self):
torch.manual_seed(0)
model = self.get_model()
config = LoraConfig(target_modules=["embed"], init_lora_weights=False)
model = get_peft_model(model, config)
weight_A = model.embed.lora_embedding_B["default"]
# with init_lora_weights=False, weight A should *not* be zero. We don't care so much about the actual values
# as long as they are not zero, in order to avoid identity transformation.
assert not torch.allclose(weight_A, torch.zeros_like(weight_A))
def test_lora_conv2d_default(self):
# default is True
torch.manual_seed(0)
model = self.get_model()
config = LoraConfig(target_modules=["conv2d"])
model = get_peft_model(model, config)
weight_A = model.conv2d.lora_A["default"].weight
weight_B = model.conv2d.lora_B["default"].weight
# use statistical test to check if weight A is from a uniform distribution
unif = self.get_uniform(weight_A.min().item(), weight_A.max().item())
_, p_value = stats.kstest(weight_A.detach().flatten().cpu().numpy(), unif.flatten().cpu().numpy())
assert p_value > 0.5
# check that weight A is *not* from a normal distribution
normal = self.get_normal(weight_A.mean().item(), weight_A.std().item())
_, p_value = stats.kstest(weight_A.detach().flatten().cpu().numpy(), normal.flatten().cpu().numpy())
assert p_value < 0.05
# check that weight B is zero
assert (weight_B == 0.0).all()
def test_lora_conv2d_init_gaussian(self):
# use gaussian init
torch.manual_seed(0)
model = self.get_model()
config = LoraConfig(target_modules=["conv2d"], init_lora_weights="gaussian")
model = get_peft_model(model, config)
weight_A = model.conv2d.lora_A["default"].weight
weight_B = model.conv2d.lora_B["default"].weight
# use statistical test to check if weight A is from a normal distribution
normal = self.get_normal(0.0, 1 / config.r)
_, p_value = stats.kstest(weight_A.detach().flatten().cpu().numpy(), normal.flatten().cpu().numpy())
assert p_value > 0.5
# check that weight A is *not* from a uniform distribution
unif = self.get_uniform(weight_A.min().item(), weight_A.max().item())
_, p_value = stats.kstest(weight_A.detach().flatten().cpu().numpy(), unif.flatten().cpu().numpy())
assert p_value < 0.05
# check that weight B is zero
assert (weight_B == 0.0).all()
def test_lora_conv2d_false(self):
torch.manual_seed(0)
model = self.get_model()
config = LoraConfig(target_modules=["conv2d"], init_lora_weights=False)
model = get_peft_model(model, config)
weight_B = model.conv2d.lora_B["default"].weight
# with init_lora_weights=False, weight B should *not* be zero. We don't care so much about the actual values
# as long as they are not zero, in order to avoid identity transformation.
assert not torch.allclose(weight_B, torch.zeros_like(weight_B))
def test_lora_scaling_default(self):
# default is True
torch.manual_seed(0)
model = self.get_model()
# check scaling factor use_rslora=False
config = LoraConfig(target_modules=["linear", "embed", "conv2d"], lora_alpha=3, r=16, use_rslora=False)
model = get_peft_model(model, config)
expected_scaling = config.lora_alpha / config.r
assert model.linear.scaling["default"] == expected_scaling
assert model.embed.scaling["default"] == expected_scaling
assert model.conv2d.scaling["default"] == expected_scaling
# testcase for bugfix for issue 2194
def test_pattern_override(self):
torch.manual_seed(0)
layer = self.get_model()
model = nn.Sequential(layer, layer)
config = LoraConfig(
target_modules=["linear"],
lora_alpha=1,
r=8,
use_rslora=False,
rank_pattern={"linear": 8},
alpha_pattern={"0.linear": 2},
)
model = get_peft_model(model, config)
scaling_with_rank_pattern = model.model[0].linear.scaling
layer = self.get_model()
model = nn.Sequential(layer, layer)
config = LoraConfig(
target_modules=["linear"], lora_alpha=1, r=8, use_rslora=False, alpha_pattern={"0.linear": 2}
)
model = get_peft_model(model, config)
scaling_without_rank_pattern = model.model[0].linear.scaling
assert scaling_with_rank_pattern == scaling_without_rank_pattern
def test_lora_pissa_linear_init_default(self, data):
model = self.get_model()
output = model(data)[0]
config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"])
peft_model = get_peft_model(deepcopy(model), config)
assert torch.allclose(output, peft_model(data)[0], atol=1e-06)
config = LoraConfig(init_lora_weights="pissa_niter_16", target_modules=["linear"])
peft_model = get_peft_model(deepcopy(model), config)
assert torch.allclose(output, peft_model(data)[0], atol=1e-06)
def test_lora_olora_linear_init_default(self, data):
model = self.get_model()
output = model(data)[0]
# Both OLoRA and olora should work
config = LoraConfig(init_lora_weights="OLoRA", target_modules=["linear"])
peft_model = get_peft_model(deepcopy(model), config)
assert torch.allclose(output, peft_model(data)[0], atol=1e-06)
def test_lora_pissa_conversion_same_output_after_loading(self, data, tmp_path):
model = self.get_model()
output_base = model(data)[0]
config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"], r=8)
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.peft_config["default"].init_lora_weights = True
peft_model.save_pretrained(tmp_path / "init-model")
peft_model.peft_config["default"].init_lora_weights = "pissa"
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_pissa = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_pissa, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "pissa-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_pissa, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 8
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_config_keys_before = list(peft_model.peft_config.keys())
peft_config_dict_before = peft_model.peft_config["default"].to_dict()
peft_model.save_pretrained(
tmp_path / "pissa-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
peft_config_keys_after = list(peft_model.peft_config.keys())
peft_config_dict_after = peft_model.peft_config["default"].to_dict()
assert peft_config_keys_before == peft_config_keys_after
assert peft_config_dict_before == peft_config_dict_after
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_pissa, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
def test_lora_pissa_conversion_same_output_after_loading_with_rank_pattern(self, data, tmp_path):
# same as above, but using rank_pattern
model = self.get_model()
output_base = model(data)[0]
# use rank_pattern here; note that since there is only a single linear layer, r is completely overridden
config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"], r=8, rank_pattern={"linear": 32})
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.peft_config["default"].init_lora_weights = True
peft_model.save_pretrained(tmp_path / "init-model")
peft_model.peft_config["default"].init_lora_weights = "pissa"
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_pissa = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_pissa, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "pissa-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_pissa, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 32
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_model.save_pretrained(
tmp_path / "pissa-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_pissa, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 64
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
def test_lora_pissa_conversion_same_output_after_loading_with_alpha_pattern(self, data, tmp_path):
# same as above, but using alpha_pattern
model = self.get_model()
output_base = model(data)[0]
# use alpha_pattern here; note that since there is only a single linear layer, lora_alpha is completely
# overridden
config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"], alpha_pattern={"linear": 5})
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.peft_config["default"].init_lora_weights = True
peft_model.save_pretrained(tmp_path / "init-model")
peft_model.peft_config["default"].init_lora_weights = "pissa"
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_pissa = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_pissa, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "pissa-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_pissa, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 8
assert model_loaded.base_model.model.linear.scaling["default"] == 5 / 8
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_model.save_pretrained(
tmp_path / "pissa-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_pissa, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
assert model_converted.base_model.model.linear.scaling["default"] == 10 / 16
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
def test_lora_pissa_conversion_same_output_after_loading_with_rslora(self, data, tmp_path):
model = self.get_model()
output_base = model(data)[0]
config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"], r=8, use_rslora=True)
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.peft_config["default"].init_lora_weights = True
peft_model.save_pretrained(tmp_path / "init-model")
peft_model.peft_config["default"].init_lora_weights = "pissa"
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_pissa = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_pissa, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "pissa-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_pissa, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 8
assert model_loaded.base_model.model.linear.scaling["default"] == 8 / (8**0.5)
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_model.save_pretrained(
tmp_path / "pissa-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_pissa, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
# same scale as before with a little bit of floating point imprecision
assert model_converted.base_model.model.linear.scaling["default"] == pytest.approx(8 / (8**0.5))
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
def test_pissa_rank_pattern_and_rslora_raises(self, tmp_path):
# it's not possible to determine the correct scale when using rslora with rank or alpha pattern, because the
# scale is not stored in the state_dict
model = self.get_model()
config = LoraConfig(
init_lora_weights="pissa", target_modules=["linear"], r=8, rank_pattern={"linear": 2}, use_rslora=True
)
peft_model = get_peft_model(model, config)
peft_model.save_pretrained(tmp_path / "init-model")
msg = re.escape("Passing `path_initial_model_for_weight_conversion` to `save_pretrained`")
with pytest.raises(ValueError, match=msg):
peft_model.save_pretrained(
tmp_path / "pissa-model", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
def test_pissa_alpha_pattern_and_rslora_raises(self, tmp_path):
# it's not possible to determine the correct scale when using rslora with rank or alpha pattern, because the
# scale is not stored in the state_dict
model = self.get_model()
config = LoraConfig(
init_lora_weights="pissa", target_modules=["linear"], r=8, alpha_pattern={"linear": 2}, use_rslora=True
)
peft_model = get_peft_model(model, config)
peft_model.save_pretrained(tmp_path / "init-model")
msg = re.escape("Passing `path_initial_model_for_weight_conversion` to `save_pretrained`")
with pytest.raises(ValueError, match=msg):
peft_model.save_pretrained(
tmp_path / "pissa-model", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
def test_olora_conversion_same_output_after_loading(self, data, tmp_path):
model = self.get_model()
output_base = model(data)[0]
config = LoraConfig(init_lora_weights="olora", target_modules=["linear"], r=8)
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.save_pretrained(tmp_path / "init-model")
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_olora = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_olora, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "olora-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "olora-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_olora, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 8
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_config_keys_before = list(peft_model.peft_config.keys())
peft_config_dict_before = peft_model.peft_config["default"].to_dict()
peft_model.save_pretrained(
tmp_path / "olora-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
peft_config_keys_after = list(peft_model.peft_config.keys())
peft_config_dict_after = peft_model.peft_config["default"].to_dict()
assert peft_config_keys_before == peft_config_keys_after
assert peft_config_dict_before == peft_config_dict_after
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "olora-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_olora, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
def test_olora_conversion_same_output_after_loading_with_rank_pattern(self, data, tmp_path):
# same as above, but using rank_pattern
model = self.get_model()
output_base = model(data)[0]
# use rank_pattern here; note that since there is only a single linear layer, r is completely overridden
config = LoraConfig(init_lora_weights="olora", target_modules=["linear"], r=8, rank_pattern={"linear": 32})
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.save_pretrained(tmp_path / "init-model")
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_olora = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_olora, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "olora-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "olora-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_olora, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 32
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_model.save_pretrained(
tmp_path / "olora-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "olora-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_olora, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 64
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
def test_olora_conversion_same_output_after_loading_with_alpha_pattern(self, data, tmp_path):
# same as above, but using alpha_pattern
model = self.get_model()
output_base = model(data)[0]
# use alpha_pattern here; note that since there is only a single linear layer, lora_alpha is completely
# overridden
config = LoraConfig(init_lora_weights="olora", target_modules=["linear"], alpha_pattern={"linear": 5})
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.save_pretrained(tmp_path / "init-model")
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_olora = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_olora, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "olora-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "olora-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_olora, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 8
assert model_loaded.base_model.model.linear.scaling["default"] == 5 / 8
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_model.save_pretrained(
tmp_path / "olora-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "olora-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_olora, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
assert model_converted.base_model.model.linear.scaling["default"] == 10 / 16
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
def test_olora_conversion_same_output_after_loading_with_rslora(self, data, tmp_path):
# same as above, but using alpha_pattern
model = self.get_model()
output_base = model(data)[0]
config = LoraConfig(init_lora_weights="olora", target_modules=["linear"], r=8, use_rslora=True)
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.save_pretrained(tmp_path / "init-model")
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_olora = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_olora, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "olora-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "olora-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_olora, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 8
assert model_loaded.base_model.model.linear.scaling["default"] == 8 / (8**0.5)
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_model.save_pretrained(
tmp_path / "olora-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "olora-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_olora, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
# same scale as before with a little bit of floating point imprecision
assert model_converted.base_model.model.linear.scaling["default"] == pytest.approx(8 / (8**0.5))
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
def test_olora_rank_pattern_and_rslora_raises(self, tmp_path):
# it's not possible to determine the correct scale when using rslora with rank or alpha pattern, because the
# scale is not stored in the state_dict
model = self.get_model()
config = LoraConfig(
init_lora_weights="olora", target_modules=["linear"], r=8, rank_pattern={"linear": 2}, use_rslora=True
)
peft_model = get_peft_model(model, config)
peft_model.save_pretrained(tmp_path / "init-model")
msg = re.escape("Passing `path_initial_model_for_weight_conversion` to `save_pretrained`")
with pytest.raises(ValueError, match=msg):
peft_model.save_pretrained(
tmp_path / "olora-model", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
def test_olora_alpha_pattern_and_rslora_raises(self, tmp_path):
# it's not possible to determine the correct scale when using rslora with rank or alpha pattern, because the
# scale is not stored in the state_dict
model = self.get_model()
config = LoraConfig(
init_lora_weights="olora", target_modules=["linear"], r=8, alpha_pattern={"linear": 2}, use_rslora=True
)
peft_model = get_peft_model(model, config)
peft_model.save_pretrained(tmp_path / "init-model")
msg = re.escape("Passing `path_initial_model_for_weight_conversion` to `save_pretrained`")
with pytest.raises(ValueError, match=msg):
peft_model.save_pretrained(
tmp_path / "olora-model", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
@pytest.mark.parametrize(
"config_kwargs, should_warn",
[
# no warning
({"init_lora_weights": "pissa", "target_modules": ["linear"]}, False),
({"init_lora_weights": "pissa_niter_3", "target_modules": ["linear"]}, False),
({"init_lora_weights": "olora", "target_modules": ["linear"]}, False),
({"init_lora_weights": "pissa", "target_modules": ["linear"], "use_rslora": True}, False),
({"init_lora_weights": "pissa_niter_3", "target_modules": ["linear"], "use_rslora": True}, False),
({"init_lora_weights": "olora", "target_modules": ["linear"], "use_rslora": True}, False),
({"init_lora_weights": "pissa", "target_modules": ["linear"], "rank_pattern": {"linear": 8}}, False),
(
{"init_lora_weights": "pissa_niter_3", "target_modules": ["linear"], "rank_pattern": {"linear": 8}},
False,
),
({"init_lora_weights": "olora", "target_modules": ["linear"], "rank_pattern": {"linear": 8}}, False),
({"init_lora_weights": "pissa", "target_modules": ["linear"], "alpha_pattern": {"linear": 8}}, False),
(
{"init_lora_weights": "pissa_niter_3", "target_modules": ["linear"], "alpha_pattern": {"linear": 8}},
False,
),
({"init_lora_weights": "olora", "target_modules": ["linear"], "alpha_pattern": {"linear": 8}}, False),
# warning
(
{
"init_lora_weights": "pissa",
"target_modules": ["linear"],
"use_rslora": True,
"rank_pattern": {"linear": 8},
},
True,
),
(
{
"init_lora_weights": "pissa_niter_3",
"target_modules": ["linear"],
"use_rslora": True,
"rank_pattern": {"linear": 8},
},
True,
),
(
{
"init_lora_weights": "olora",
"target_modules": ["linear"],
"use_rslora": True,
"rank_pattern": {"linear": 8},
},
True,
),
(
{
"init_lora_weights": "pissa",
"target_modules": ["linear"],
"use_rslora": True,
"alpha_pattern": {"linear": 8},
},
True,
),
(
{
"init_lora_weights": "pissa_niter_3",
"target_modules": ["linear"],
"use_rslora": True,
"alpha_pattern": {"linear": 8},
},
True,
),
(
{
"init_lora_weights": "olora",
"target_modules": ["linear"],
"use_rslora": True,
"alpha_pattern": {"linear": 8},
},
True,
),
(
{
"init_lora_weights": "pissa",
"target_modules": ["linear"],
"use_rslora": True,
"rank_pattern": {"linear": 8},
"alpha_pattern": {"linear": 8},
},
True,
),
(
{
"init_lora_weights": "pissa_niter_3",
"target_modules": ["linear"],
"use_rslora": True,
"rank_pattern": {"linear": 8},
"alpha_pattern": {"linear": 8},
},
True,
),
(
{
"init_lora_weights": "olora",
"target_modules": ["linear"],
"use_rslora": True,
"rank_pattern": {"linear": 8},
"alpha_pattern": {"linear": 8},
},
True,
),
],
)
def test_lora_config_pissa_olora_warns(self, config_kwargs, should_warn, recwarn):
# Using post training conversion of modified base weights to restore their initial values (PiSSA, OLoRA) cannot
# be correctly done when using rslora + rank_pattern/alpha_pattern. We can't really know if the user intends
# this when they'll eventually call save_pretrained (i.e. if they'll pass
# path_initial_model_for_weight_conversionl). Therefore, we only warn but don't raise an error here.
msg = re.escape("Using Rank-Stabilized LoRA with rank_pattern/alpha_pattern and post-training conversion")
if should_warn:
LoraConfig(**config_kwargs)
assert len(recwarn.list) == 1
with pytest.warns(UserWarning, match=msg):
LoraConfig(**config_kwargs)
else:
LoraConfig(**config_kwargs)
assert not recwarn.list
@pytest.mark.parametrize("init_method", ["pissa", "olora"])
@pytest.mark.parametrize("pissa_olora_loaded_first", [False, True])
def test_load_pissa_olora_with_other_adapter_warns(self, init_method, pissa_olora_loaded_first, recwarn, tmp_path):
# Since PiSSA/OLoRA modifies the base weights, it should not be combined with other adapters. Check for a
# warning. See #2184.
# create an adapter without PiSSA/OloRA
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
model = AutoModelForCausalLM.from_pretrained(model_id)
model = get_peft_model(model, LoraConfig(init_lora_weights=True))
model.save_pretrained(tmp_path / "adapter0")
del model
# create a model with PiSSA/OLoRA
model = AutoModelForCausalLM.from_pretrained(model_id)
model = get_peft_model(model, LoraConfig(init_lora_weights=init_method))
model.save_pretrained(tmp_path / "adapter1")
del model
# load the model
if pissa_olora_loaded_first:
path0, path1 = tmp_path / "adapter1", tmp_path / "adapter0"
else:
path0, path1 = tmp_path / "adapter0", tmp_path / "adapter1"
model = AutoModelForCausalLM.from_pretrained(model_id)
model = PeftModel.from_pretrained(model, path0)
model = model.load_adapter(path1, adapter_name="other")
if init_method == "pissa":
msg = "PiSSA changes the base weights of the model and should thus not be used with other adapters"
else:
msg = "OLoRA changes the base weights of the model and should thus not be used with other adapters"
assert any(str(w.message).startswith(msg) for w in recwarn.list)
def test_lora_rslora_scaling(self):
# default is True
torch.manual_seed(0)
model = self.get_model()
# check scaling factor use_rslora=True
config = LoraConfig(target_modules=["linear", "embed", "conv2d"], lora_alpha=3, r=16, use_rslora=True)
model = get_peft_model(model, config)
expected_scaling = config.lora_alpha / (config.r**0.5)
assert model.linear.scaling["default"] == expected_scaling
assert model.embed.scaling["default"] == expected_scaling
assert model.conv2d.scaling["default"] == expected_scaling
def test_lora_default_scaling_pattern(self):
# default is True
torch.manual_seed(0)
model = self.get_model()
# check scaling factor use_rslora=False with rank and alpha pattern
config = LoraConfig(
target_modules=["linear", "embed", "conv2d"],
rank_pattern={"embed": 9, "conv2d": 16},
alpha_pattern={"linear": 11, "conv2d": 13},
lora_alpha=17,
r=25,
use_rslora=False,
)
model = get_peft_model(model, config)
expected_scaling = {
"linear": config.alpha_pattern["linear"] / config.r,
"embed": config.lora_alpha / config.rank_pattern["embed"],
"conv2d": config.alpha_pattern["conv2d"] / config.rank_pattern["conv2d"],
}
assert model.linear.scaling["default"] == expected_scaling["linear"]
assert model.embed.scaling["default"] == expected_scaling["embed"]
assert model.conv2d.scaling["default"] == expected_scaling["conv2d"]
def test_lora_rslora_scaling_pattern(self):
# default is True
torch.manual_seed(0)
model = self.get_model()
# check scaling factor use_rslora=True with rank and alpha pattern
config = LoraConfig(
target_modules=["linear", "embed", "conv2d"],
rank_pattern={"embed": 9, "conv2d": 16},
alpha_pattern={"linear": 11, "conv2d": 13},
lora_alpha=17,
r=25,
use_rslora=True,
)
model = get_peft_model(model, config)
expected_scaling = {
"linear": config.alpha_pattern["linear"] / (config.r**0.5),
"embed": config.lora_alpha / (config.rank_pattern["embed"] ** 0.5),
"conv2d": config.alpha_pattern["conv2d"] / (config.rank_pattern["conv2d"] ** 0.5),
}
assert model.linear.scaling["default"] == expected_scaling["linear"]
assert model.embed.scaling["default"] == expected_scaling["embed"]
assert model.conv2d.scaling["default"] == expected_scaling["conv2d"]
def test_lora_use_dora_linear(self, data):
# check that dora is a no-op when initialized
torch.manual_seed(0)
model = self.get_model()
output_base, _, _ = model(data)
# check scaling factor use_rslora=True
config = LoraConfig(target_modules=["linear"], use_dora=True)
model = get_peft_model(model, config)
with model.disable_adapter():
output_disabled, _, _ = model(data)
output_dora, _, _ = model(data)
assert torch.allclose(output_base, output_disabled)
assert torch.allclose(output_base, output_dora)
def test_lora_use_dora_linear_init_false(self, data):
# with init_lora_weights=False, dora should not be a no-op
torch.manual_seed(0)
model = self.get_model()
output_base, _, _ = model(data)
# check scaling factor use_rslora=True
config = LoraConfig(target_modules=["linear"], use_dora=True, init_lora_weights=False)
model = get_peft_model(model, config)
with model.disable_adapter():
output_disabled, _, _ = model(data)
output_dora, _, _ = model(data)
assert torch.allclose(output_base, output_disabled)
assert not torch.allclose(output_base, output_dora)
def test_lora_use_dora_with_megatron_core_raises(self):
megatron_config = {"does-not": "matter-here"}
with pytest.raises(ValueError, match="DoRA does not support megatron_core"):
LoraConfig(target_modules=["linear"], use_dora=True, megatron_config=megatron_config)
@pytest.fixture
def mha_cls(self):
class ModelMha(nn.Module):
def __init__(self, kdim=None, vdim=None):
super().__init__()
self.mha = nn.MultiheadAttention(10, 2, kdim=kdim, vdim=vdim)
self.lin0 = nn.Linear(10, 2)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = X.float()
X, _ = self.mha(X, X, X)
X = self.lin0(X)
X = self.sm(X)
return X
return ModelMha
def test_mha_load_init_model_first(self, mha_cls):
# This test used to fail and require a workaround, for more context, see:
# https://github.com/huggingface/peft/pull/1324#issuecomment-2252473980
# The workaround was that _restore_weights had to be called manually on lora.MHA layers in order to make loading
# the state dict work. With recent changes, this workaround is no longer required, so that test has been
# deleted.
inputs = torch.rand(10, 10, 10)
model = mha_cls()
config = LoraConfig(target_modules=["mha"], init_lora_weights=False)
model = get_peft_model(model, config).eval()
restore_state_dict = {k: v.detach().cpu() for k, v in model.state_dict().items()}
del model
model = mha_cls()
model = get_peft_model(model, config)
# the workaround used to be:
# for module in model.modules():
# if isinstance(module, peft.tuners.lora.layer.MultiheadAttention):
# module._restore_weights()
model(inputs)
model.load_state_dict(restore_state_dict)
def test_mha_with_separate_qkv_embed_raises(self, mha_cls):
# passing different kdim and vdim results in separate parameters for q, k, v, which is not supported (yet)
model = mha_cls(kdim=20, vdim=30)
config = LoraConfig(target_modules=["mha"])
msg = "Only same embed for query/key/value is supported as of now for MultiheadAttention"
with pytest.raises(ValueError, match=msg):
get_peft_model(model, config)
def test_mha_with_dora_raises(self, mha_cls):
model = mha_cls()
config = LoraConfig(target_modules=["mha"], use_dora=True)
msg = re.escape("MultiheadAttention does not support DoRA (yet), please set use_dora to False")
with pytest.raises(ValueError, match=msg):
get_peft_model(model, config)
def test_mha_exposes_attributes(self, mha_cls):
# MHA requires a bunch of attributes to be exposed, try to check them exhaustively here
model = mha_cls()
embed_dim = model.mha.embed_dim
kdim = model.mha.kdim
vdim = model.mha.vdim
qkv_same_embed_dim = model.mha._qkv_same_embed_dim
num_heads = model.mha.num_heads
dropout = model.mha.dropout
batch_first = model.mha.batch_first
head_dim = model.mha.head_dim
in_proj_weight = model.mha.in_proj_weight
in_proj_bias = model.mha.in_proj_bias
out_proj = model.mha.out_proj
bias_k = model.mha.bias_k
bias_v = model.mha.bias_v
add_zero_attn = model.mha.add_zero_attn
config = LoraConfig(target_modules=["mha"])
peft_model = get_peft_model(model, config)
assert peft_model.base_model.mha.embed_dim == embed_dim
assert peft_model.base_model.mha.kdim == kdim
assert peft_model.base_model.mha.vdim == vdim
assert peft_model.base_model.mha._qkv_same_embed_dim == qkv_same_embed_dim
assert peft_model.base_model.mha.num_heads == num_heads
assert peft_model.base_model.mha.dropout == dropout
assert peft_model.base_model.mha.batch_first == batch_first
assert peft_model.base_model.mha.head_dim == head_dim
if in_proj_weight is not None:
assert torch.allclose(peft_model.base_model.mha.in_proj_weight, in_proj_weight)
else:
assert peft_model.base_model.mha.in_proj_weight is None
if in_proj_bias is not None:
assert torch.allclose(peft_model.base_model.mha.in_proj_bias, in_proj_bias)
else:
assert peft_model.base_model.mha.in_proj_bias is None
assert peft_model.base_model.mha.out_proj is out_proj
if bias_k is not None:
assert torch.allclose(peft_model.base_model.mha.bias_k, bias_k)
else:
assert peft_model.base_model.mha.bias_k is None
if bias_v is not None:
assert torch.allclose(peft_model.base_model.mha.bias_v, bias_v)
else:
assert peft_model.base_model.mha.bias_v is None
assert peft_model.base_model.mha.add_zero_attn == add_zero_attn
def test_mha_merge_masks_method(self, mha_cls):
# MHA requires a merge_masks method to be exposed, check that it works
model = mha_cls()
config = LoraConfig(target_modules=["mha"])
peft_model = get_peft_model(model, config)
attn_mask = torch.randint(0, 2, (10, 10))
key_padding_mask = torch.randint(0, 2, (10, 10))
query = torch.rand(10, 10, 10)
merged_mask0, mask_type0 = model.mha.merge_masks(attn_mask, key_padding_mask, query)
merged_mask1, mask_type1 = peft_model.base_model.mha.merge_masks(attn_mask, key_padding_mask, query)
assert torch.allclose(merged_mask0, merged_mask1)
assert mask_type0 == mask_type1
def test_lora_with_bias_extra_params(self):
# lora with lora_bias=True
model = self.get_model()
config = LoraConfig(target_modules=["linear", "conv2d"], lora_bias=False)
model_no_bias = get_peft_model(model, config)
model = self.get_model()
config = LoraConfig(target_modules=["linear", "conv2d"], lora_bias=True)
model_bias = get_peft_model(model, config)
# check that bias for LoRA B is set
assert model_no_bias.base_model.model.linear.lora_B["default"].bias is None
assert model_bias.base_model.model.linear.lora_B["default"].bias.shape == (1000,)
assert model_no_bias.base_model.model.conv2d.lora_B["default"].bias is None
assert model_bias.base_model.model.conv2d.lora_B["default"].bias.shape == (100,)
# check that the same params are present except for the extra bias term
params_no_bias = {name for name, _ in model_no_bias.named_parameters()}
params_bias = {name for name, _ in model_bias.named_parameters()}
extra_params = {
"base_model.model.linear.lora_B.default.bias",
"base_model.model.conv2d.lora_B.default.bias",
}
assert params_bias - params_no_bias == extra_params
assert params_no_bias.issubset(params_bias)
def test_lora_with_bias_embedding_raises(self):
# lora with lora_bias=True is not supported for embedding layers
model = self.get_model()
config = LoraConfig(target_modules=["embed"], lora_bias=True)
msg = "lora_bias=True is not supported for Embedding"
with pytest.raises(ValueError, match=msg):
get_peft_model(model, config)
@pytest.mark.parametrize(
"extra_kwargs",
[
{"use_dora": True},
{"init_lora_weights": "eva"},
{"init_lora_weights": "gaussian"},
{"init_lora_weights": "loftq", "loftq_config": LoftQConfig()},
{"init_lora_weights": "olora"},
{"init_lora_weights": "pissa"},
{"init_lora_weights": "pissa_niter_3"},
],
)
def test_lora_with_bias_incompatible_arguments(self, extra_kwargs):
# some arguments don't work in conjunction with lora_bias and should raise
# just check the common chunk of the error message
msg = "The argument lora_bias=True is"
with pytest.raises(ValueError, match=msg):
LoraConfig(target_modules=["linear"], lora_bias=True, **extra_kwargs)
class TestLokrInitialization:
torch_device = infer_device()
def get_model(self):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
# Choose a large weight so that averages are close to expected values.
self.linear = nn.Linear(1000, 1000)
self.conv2d = nn.Conv2d(100, 100, 3)
def forward(self, x):
x_4d = x.flatten().reshape(1, 100, 10, 10)
return self.linear(x), self.conv2d(x_4d)
return MyModule().eval().to(self.torch_device)
@pytest.fixture
def data(self):
return torch.rand(10, 1000).to(self.torch_device)
def test_lokr_linear_init_default(self, data):
torch.manual_seed(0)
model = self.get_model()
output_before = model(data)[0]
config = LoKrConfig(target_modules=["linear"])
model = get_peft_model(model, config)
output_after = model(data)[0]
assert torch.allclose(output_before, output_after)
def test_lokr_linear_init_false(self, data):
torch.manual_seed(0)
model = self.get_model()
output_before = model(data)[0]
config = LoKrConfig(target_modules=["linear"], init_weights=False)
model = get_peft_model(model, config)
output_after = model(data)[0]
assert not torch.allclose(output_before, output_after)
def test_lokr_linear_init_lycoris(self, data):
torch.manual_seed(0)
model = self.get_model()
output_before = model(data)[0]
config = LoKrConfig(target_modules=["linear"], init_weights="lycoris")
model = get_peft_model(model, config)
output_after = model(data)[0]
assert torch.allclose(output_before, output_after)
def test_lokr_conv2d_init_default(self, data):
torch.manual_seed(0)
model = self.get_model()
output_before = model(data)[1]
config = LoKrConfig(target_modules=["conv2d"])
model = get_peft_model(model, config)
output_after = model(data)[1]
assert torch.allclose(output_before, output_after)
def test_lokr_conv2d_init_false(self, data):
torch.manual_seed(0)
model = self.get_model()
output_before = model(data)[1]
config = LoKrConfig(target_modules=["conv2d"], init_weights=False)
model = get_peft_model(model, config)
output_after = model(data)[1]
assert not torch.allclose(output_before, output_after)
def test_lokr_conv2d_init_lycoris(self, data):
torch.manual_seed(0)
model = self.get_model()
output_before = model(data)[1]
config = LoKrConfig(target_modules=["conv2d"], init_weights="lycoris")
model = get_peft_model(model, config)
output_after = model(data)[1]
assert torch.allclose(output_before, output_after)
class TestAdaLoraInitialization:
torch_device = infer_device()
def test_adalora_target_modules_set(self):
config = AdaLoraConfig(target_modules=["linear", "embed", "conv2d"], total_step=1)
assert config.target_modules == {"linear", "embed", "conv2d"}
def test_adalora_use_dora_raises(self):
with pytest.raises(ValueError, match="ADALORA does not support DoRA"):
AdaLoraConfig(use_dora=True, total_step=1)
def test_adalora_loftq_config_raises(self):
with pytest.raises(ValueError, match="ADALORA does not support LOFTQ"):
AdaLoraConfig(init_lora_weights="loftq", loftq_config={"loftq": "config"}, total_step=1)
def get_model(self):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
# choose a large weight so that averages are close to expected values
self.linear = nn.Linear(1000, 1000)
def forward(self, x):
return self.linear(x)
return MyModule().eval().to(self.torch_device)
@pytest.fixture
def data(self):
return torch.rand(10, 1000).to(self.torch_device)
def test_adalora_default_init_identity(self, data):
# default is True
torch.manual_seed(0)
model = self.get_model()
output_before = model(data)
config = AdaLoraConfig(target_modules=["linear"], total_step=1)
model = get_peft_model(model, config)
output_after = model(data)
assert torch.allclose(output_before, output_after)
class TestPromptTuningInitialization:
torch_device = infer_device()
def get_model(self):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
# choose a large weight so that averages are close to expected values
self.linear = nn.Linear(1000, 1000)
self.embed = nn.Embedding(1000, 1000)
self.conv2d = nn.Conv2d(100, 100, 3)
def forward(self, x):
x_int = (100 * x).int()
x_4d = x.flatten().reshape(1, 100, 10, 10)
return self.linear(x), self.embed(x_int), self.conv2d(x_4d)
return MyModule().eval().to(self.torch_device)
def test_use_prompt_tuning_init_text_raises(self):
with pytest.raises(ValueError, match="When prompt_tuning_init='TEXT', tokenizer_name_or_path can't be None"):
PromptTuningConfig(prompt_tuning_init="TEXT", prompt_tuning_init_text="prompt tuning init text")
with pytest.raises(ValueError, match="When prompt_tuning_init='TEXT', prompt_tuning_init_text can't be None"):
PromptTuningConfig(prompt_tuning_init="TEXT", tokenizer_name_or_path="t5-base")
class TestVeraInitialization:
torch_device = infer_device()
def get_model(self):
class MLP(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.lin0 = nn.Linear(10, 20, bias=bias)
self.lin1 = nn.Linear(20, 2, bias=bias)
def forward(self, X):
X = self.lin0(X)
X = self.lin1(X)
return X
return MLP().to(self.torch_device)
def test_vera_mixing_save_projection_raises(self):
# it is unclear what the right thing to do would be if some adapters save the projection weights and some don't
# so we better raise an error
config0 = VeraConfig(target_modules=["lin0"], init_weights=False, save_projection=True)
model = self.get_model()
model = get_peft_model(model, config0)
config1 = VeraConfig(target_modules=["lin0"], init_weights=False, save_projection=False)
msg = re.escape(
"VeRA projection weights must be saved for all adapters or none, but got multiple different values: "
"[False, True]"
)
with pytest.raises(ValueError, match=msg):
model.add_adapter("other", config1)
def test_vera_add_second_adapter_with_incompatible_input_shape(self):
config0 = VeraConfig(target_modules=["lin0"], r=8)
config1 = VeraConfig(target_modules=["lin1"])
base_model = self.get_model()
lin0_in_feat = base_model.lin0.in_features
lin1_in_feat = base_model.lin1.in_features
model = get_peft_model(base_model, config0)
# not full message but enough to identify the error
msg = f"vera_A has a size of {lin0_in_feat} but {lin1_in_feat} or greater is required"
with pytest.raises(ValueError, match=msg):
model.add_adapter("other", config1)
def test_vera_add_second_adapter_with_higher_rank(self):
rank0 = 123
rank1 = 456
config0 = VeraConfig(target_modules=["lin0"], r=rank0)
# second adapter has higher rank
config1 = VeraConfig(target_modules=["lin0"], r=rank1)
model = get_peft_model(self.get_model(), config0)
# not full message but enough to identify the error
msg = f"vera_A has a size of {rank0} but {rank1} or greater is required"
with pytest.raises(ValueError, match=msg):
model.add_adapter("other", config1)
class TestVBLoraInitialization:
torch_device = infer_device()
def get_model(self):
class MLP(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.lin0 = nn.Linear(10, 30, bias=bias)
self.lin1 = nn.Linear(30, 2, bias=bias)
def forward(self, X):
X = self.lin0(X)
X = self.lin1(X)
return X
return MLP().to(self.torch_device)
def test_vblora_with_incompatible_vector_length_with_in_features(self):
vector_length = 3
model = self.get_model()
config = VBLoRAConfig(target_modules=["lin0"], vector_length=vector_length)
msg = f"`in_features` {model.lin0.in_features} must be divisible by `vector_length` {vector_length}"
with pytest.raises(ValueError, match=msg):
get_peft_model(model, config)
def test_vblora_with_incompatible_vector_length_with_out_features(self):
vector_length = 3
model = self.get_model()
config = VBLoRAConfig(target_modules=["lin1"], vector_length=vector_length)
msg = f"`out_features` {model.lin1.out_features} must be divisible by `vector_length` {vector_length}"
with pytest.raises(ValueError, match=msg):
get_peft_model(model, config)
class TestNoInfiniteRecursionDeepspeed:
# see #1892 for details
classes = [
PeftModel,
PeftMixedModel,
PeftModelForSequenceClassification,
PeftModelForQuestionAnswering,
PeftModelForTokenClassification,
PeftModelForCausalLM,
PeftModelForSeq2SeqLM,
PeftModelForFeatureExtraction,
]
@pytest.fixture
def wrap_init(self):
# emulates the wrapper from DeepSpeed
import functools
def decorator(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
hasattr(self, "abc") # any hasattr will do
f(self, *args, **kwargs)
return wrapper
return decorator
@pytest.fixture
def model(self):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(10, 10)
# to emulate LMs:
self.prepare_inputs_for_generation = None
self._prepare_encoder_decoder_kwargs_for_generation = None
return MyModule()
@pytest.mark.parametrize("cls", classes)
def test_no_infinite_recursion(self, cls, model, wrap_init):
original_init = cls.__init__
try:
cls.__init__ = wrap_init(cls.__init__)
# this would trigger an infinite loop before the fix in 1892
cls(model, LoraConfig(target_modules=["linear"]))
finally:
# ensure there are no side effects of this test
cls.__init__ = original_init
class TestLoadAdapterOfflineMode:
# make sure that PEFT honors offline mode
@contextmanager
def hub_offline_ctx(self):
# this is required to simulate offline mode, setting the env var dynamically inside the test does not work
# because the value is checked only once at the start of the session
with patch("huggingface_hub.constants.HF_HUB_OFFLINE", True):
reset_sessions()
yield
reset_sessions()
def test_load_from_hub_then_offline_model(self):
# this uses LoRA but it's the same mechanism for other methods
peft_model_id = "peft-internal-testing/gpt2-lora-random"
base_model = AutoModelForCausalLM.from_pretrained("gpt2")
# first ensure that the adapter model has been downloaded
PeftModel.from_pretrained(base_model, peft_model_id)
del base_model
base_model = AutoModelForCausalLM.from_pretrained("gpt2")
with self.hub_offline_ctx():
# does not raise
PeftModel.from_pretrained(base_model, peft_model_id)
class TestCustomModelConfigWarning:
# Check potential warnings when the user provided base_model_name_or_path is overridden by PEFT. See #2001 for
# context. We use LoRA for this test but the same applies to other methods
@pytest.fixture
def custom_module(self):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.lin = nn.Linear(10, 10)
return MyModule()
def test_no_warning_by_default_transformers_model(self, recwarn):
# first a sanity test that there is no warning by default when using a model from transformers
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-OPTForCausalLM")
get_peft_model(model, LoraConfig())
for warning in recwarn.list:
assert "renamed" not in str(warning.message)
def test_no_warning_by_default_custom_model(self, custom_module, recwarn):
# same as above but with a custom model
get_peft_model(custom_module, LoraConfig(target_modules=["lin"]))
for warning in recwarn.list:
assert "renamed" not in str(warning.message)
def test_warning_name_transformers_model(self, recwarn):
# The base_model_name_or_path provided by the user is overridden.
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-OPTForCausalLM")
custom_name = "custom_name"
get_peft_model(model, LoraConfig(base_model_name_or_path=custom_name))
msg = f"was renamed from '{custom_name}' to 'hf-internal-testing/tiny-random-OPTForCausalLM'"
assert any(msg in str(warning.message) for warning in recwarn.list)
def test_warning_name_custom_model(self, custom_module, recwarn):
custom_name = "custom_name"
get_peft_model(custom_module, LoraConfig(target_modules=["lin"], base_model_name_or_path=custom_name))
msg = f"was renamed from '{custom_name}' to 'None'"
assert any(msg in str(warning.message) for warning in recwarn.list)
def test_warning_name_custom_model_with_custom_name(self, custom_module, recwarn):
custom_name = "custom_name"
custom_module.name_or_path = "foobar"
get_peft_model(custom_module, LoraConfig(target_modules=["lin"], base_model_name_or_path=custom_name))
msg = f"was renamed from '{custom_name}' to 'foobar'"
assert any(msg in str(warning.message) for warning in recwarn.list)
class TestLowCpuMemUsage:
"""Test for the low CPU memory usage option for loading PEFT models.
Note that we have `test_load_model_low_cpu_mem_usage` in the custom model and stable diffusion tests. Those are
broad tests (i.e. testing all the supported PEFT methods) but not very deep (only testing if loading works and the
device is correctly set). The test class here goes deeper but only tests LoRA, as checking all PEFT methods would
be too much.
"""
# test on CPU and optionally on accelerator device
devices = ["cpu"]
_device = infer_device()
if _device != "cpu":
devices.append(_device)
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
def get_model(self):
return AutoModelForCausalLM.from_pretrained(self.model_id)
@pytest.fixture(scope="class")
def lora_config(self):
return LoraConfig(init_lora_weights=False, target_modules="all-linear")
@pytest.fixture(scope="class")
def lora_path(self, tmp_path_factory, lora_config):
torch.manual_seed(0)
tmp_path = tmp_path_factory.mktemp("lora")
model = self.get_model()
model = get_peft_model(model, lora_config)
model.save_pretrained(tmp_path)
return tmp_path
@pytest.fixture(scope="class")
def inputs(self):
return {"input_ids": torch.randint(0, 100, (1, 10)), "attention_mask": torch.ones(1, 10)}
@pytest.mark.parametrize("device", devices)
def test_from_pretrained_low_cpu_mem_usage_works(self, device, inputs, lora_path):
model = self.get_model().to(device)
inputs = {k: v.to(device) for k, v in inputs.items()}
model = PeftModel.from_pretrained(model, lora_path, torch_device=device).eval()
device_set_not_low_cpu_mem = {p.device.type for p in model.parameters()}
logits_not_low_cpu_mem = model(**inputs).logits
del model
model = self.get_model().to(device)
model = PeftModel.from_pretrained(model, lora_path, low_cpu_mem_usage=True, torch_device=device).eval()
device_set_low_cpu_mem = {p.device.type for p in model.parameters()}
logits_low_cpu_mem = model(**inputs).logits
assert device_set_low_cpu_mem == device_set_not_low_cpu_mem
assert torch.allclose(logits_low_cpu_mem, logits_not_low_cpu_mem)
@pytest.mark.parametrize("device", devices)
def test_load_adapter_low_cpu_mem_usage_works(self, device, inputs, lora_path, lora_config):
model = self.get_model().to(device)
inputs = {k: v.to(device) for k, v in inputs.items()}
torch.manual_seed(0)
model = get_peft_model(model, lora_config)
model.load_adapter(lora_path, adapter_name="other", torch_device=device)
model.set_adapter("other")
model.eval()
device_set_not_low_cpu_mem = {p.device.type for p in model.parameters()}
logits_not_low_cpu_mem = model(**inputs).logits
del model
model = self.get_model().to(device)
torch.manual_seed(0)
model = get_peft_model(model, lora_config)
model.load_adapter(lora_path, adapter_name="other", low_cpu_mem_usage=True, torch_device=device)
model.set_adapter("other")
model.eval()
device_set_low_cpu_mem = {p.device.type for p in model.parameters()}
logits_low_cpu_mem = model(**inputs).logits
assert device_set_low_cpu_mem == device_set_not_low_cpu_mem
assert torch.allclose(logits_low_cpu_mem, logits_not_low_cpu_mem)
@pytest.mark.parametrize("device", devices)
def test_get_peft_model_low_cpu_mem_usage_works(self, device, inputs):
# when calling get_peft_model, the PEFT weights will not be initialized on device but remain on meta
model = self.get_model().to(device)
model = get_peft_model(model, LoraConfig(target_modules="all-linear"), low_cpu_mem_usage=True)
devices_lora_weights = {p.device for n, p in model.named_parameters() if "lora_" in n}
expected = {torch.device("meta")}
assert devices_lora_weights == expected
@pytest.mark.parametrize("device", devices)
def test_get_peft_model_with_task_type_low_cpu_mem_usage_works(self, device, inputs):
# same as the previous test, but pass the task_type argument
model = self.get_model().to(device)
model = get_peft_model(
model, LoraConfig(target_modules="all-linear", task_type="CAUSAL_LM"), low_cpu_mem_usage=True
)
devices_lora_weights = {p.device for n, p in model.named_parameters() if "lora_" in n}
expected = {torch.device("meta")}
assert devices_lora_weights == expected
@pytest.mark.parametrize("device", devices)
def test_inject_adapter_low_cpu_mem_usage_works(self, device, inputs, lora_path, lora_config):
# external libs like transformers and diffusers use inject_adapter_in_model, let's check that this also works
model = self.get_model().to(device)
inputs = {k: v.to(device) for k, v in inputs.items()}
torch.manual_seed(0)
model = get_peft_model(model, lora_config)
model.load_adapter(lora_path, adapter_name="other", torch_device=device)
model.set_adapter("other")
model.eval()
device_set_not_low_cpu_mem = {p.device.type for p in model.parameters()}
logits_not_low_cpu_mem = model(**inputs).logits
del model
torch.manual_seed(0)
model = self.get_model().to(device)
inject_adapter_in_model(lora_config, model, low_cpu_mem_usage=True)
device_set_before_loading = {p.device.type for p in model.parameters()}
# at this stage, lora weights are still on meta device
assert device_set_before_loading == {"meta", device}
state_dict = load_file(lora_path / "adapter_model.safetensors")
remapped_dict = {}
prefix = "base_model.model."
for key, val in state_dict.items():
new_key = key[len(prefix) :]
remapped_dict[new_key] = val.to(device)
errors = set_peft_model_state_dict(model, remapped_dict, low_cpu_mem_usage=True)
# sanity check: no unexpected keys
assert not errors.unexpected_keys
model.eval()
device_set_low_cpu_mem = {p.device.type for p in model.parameters()}
logits_low_cpu_mem = model(**inputs).logits
assert device_set_low_cpu_mem == device_set_not_low_cpu_mem
assert torch.allclose(logits_low_cpu_mem, logits_not_low_cpu_mem)
############################
# tests for PeftMixedModel #
############################
@pytest.mark.parametrize("device", devices)
def test_mixed_model_from_pretrained_low_cpu_mem_usage_works(self, device, inputs, lora_path):
model = self.get_model().to(device)
inputs = {k: v.to(device) for k, v in inputs.items()}
model = PeftMixedModel.from_pretrained(model, lora_path, torch_device=device).eval()
device_set_not_low_cpu_mem = {p.device.type for p in model.parameters()}
logits_not_low_cpu_mem = model(**inputs).logits
del model
model = self.get_model().to(device)
model = PeftMixedModel.from_pretrained(model, lora_path, low_cpu_mem_usage=True, torch_device=device).eval()
device_set_low_cpu_mem = {p.device.type for p in model.parameters()}
logits_low_cpu_mem = model(**inputs).logits
assert device_set_low_cpu_mem == device_set_not_low_cpu_mem
assert torch.allclose(logits_low_cpu_mem, logits_not_low_cpu_mem)
@pytest.mark.parametrize("device", devices)
def test_mixed_model_load_adapter_low_cpu_mem_usage_works(self, device, inputs, lora_path, lora_config):
model = self.get_model().to(device)
inputs = {k: v.to(device) for k, v in inputs.items()}
torch.manual_seed(0)
model = PeftModel.from_pretrained(model, lora_path)
model.load_adapter(lora_path, adapter_name="other", torch_device=device)
model.set_adapter("other")
model.eval()
device_set_not_low_cpu_mem = {p.device.type for p in model.parameters()}
logits_not_low_cpu_mem = model(**inputs).logits
del model
model = self.get_model().to(device)
torch.manual_seed(0)
model = PeftModel.from_pretrained(model, lora_path)
model.load_adapter(lora_path, adapter_name="other", low_cpu_mem_usage=True, torch_device=device)
model.set_adapter("other")
model.eval()
device_set_low_cpu_mem = {p.device.type for p in model.parameters()}
logits_low_cpu_mem = model(**inputs).logits
assert device_set_low_cpu_mem == device_set_not_low_cpu_mem
assert torch.allclose(logits_low_cpu_mem, logits_not_low_cpu_mem)
def test_from_pretrained_missing_keys_warning(recwarn, tmp_path):
# For more context, see issue 2115
# When loading a PEFT adapter and we're missing a PEFT-specific weight, there should be a warning.
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-OPTForCausalLM")
config = LoraConfig()
model = get_peft_model(model, config)
state_dict = model.state_dict()
# first, sanity check that there are no warnings if no key is missing
model.save_pretrained(tmp_path)
del model
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-OPTForCausalLM")
model = PeftModel.from_pretrained(model, tmp_path)
msg = "Found missing adapter keys"
assert not any(msg in str(w.message) for w in recwarn.list)
# remove a key from the state_dict
missing_key = "base_model.model.model.decoder.layers.0.self_attn.v_proj.lora_A.default.weight"
def new_state_dict():
return {k: v for k, v in state_dict.items() if k != missing_key}
model.state_dict = new_state_dict
model.save_pretrained(tmp_path)
del model
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-OPTForCausalLM")
model = PeftModel.from_pretrained(model, tmp_path)
assert any(msg in str(w.message) for w in recwarn.list)
assert any(missing_key in str(w.message) for w in recwarn.list)
class TestNamingConflictWarning:
"""
Tests for warnings related to naming conflicts between adapter names and tuner prefixes. References: Issue 2252
"""
@pytest.fixture(autouse=True)
def setup(self):
self.peft_config = LoraConfig()
self.prefix = PEFT_TYPE_TO_PREFIX_MAPPING[self.peft_config.peft_type]
self.base_model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-OPTForCausalLM")
def _save_and_reload_model(self, model, adapter_name, tmp_path):
# Helper method to save and reload the PEFT model
model.save_pretrained(tmp_path, selected_adapters=[adapter_name])
del model
reloaded_base_model = AutoModelForCausalLM.from_pretrained(tmp_path / adapter_name)
return PeftModel.from_pretrained(reloaded_base_model, tmp_path / adapter_name)
def test_no_warning_without_naming_conflict_get_peft_model(self, recwarn):
# No warning should be raised when there is no naming conflict during get_peft_model.
non_conflict_adapter = "adapter"
_ = get_peft_model(self.base_model, self.peft_config, adapter_name=non_conflict_adapter)
expected_msg = f"Adapter name {non_conflict_adapter} should not be contained in the prefix {self.prefix}."
assert not any(expected_msg in str(w.message) for w in recwarn.list)
def test_no_warning_without_naming_conflict_add_adapter(self, recwarn):
# No warning should be raised when adding an adapter without naming conflict.
non_conflict_adapter = "adapter"
other_non_conflict_adapter = "other_adapter"
model = get_peft_model(self.base_model, self.peft_config, adapter_name=non_conflict_adapter)
_ = model.add_adapter(other_non_conflict_adapter, self.peft_config)
expected_msg = (
f"Adapter name {other_non_conflict_adapter} should not be contained in the prefix {self.prefix}."
)
assert not any(expected_msg in str(w.message) for w in recwarn.list)
def test_no_warning_without_naming_conflict_save_and_load(self, recwarn, tmp_path):
# No warning should be raised when saving and loading the model without naming conflict.
non_conflict_adapter = "adapter"
model = get_peft_model(self.base_model, self.peft_config, adapter_name=non_conflict_adapter)
_ = self._save_and_reload_model(model, non_conflict_adapter, tmp_path)
expected_msg = f"Adapter name {non_conflict_adapter} should not be contained in the prefix {self.prefix}."
assert not any(expected_msg in str(w.message) for w in recwarn.list)
def test_warning_naming_conflict_get_peft_model(self, recwarn):
# Warning is raised when the adapter name conflicts with the prefix in get_peft_model.
conflicting_adapter_name = self.prefix[:-1]
_ = get_peft_model(self.base_model, self.peft_config, adapter_name=conflicting_adapter_name)
expected_msg = f"Adapter name {conflicting_adapter_name} should not be contained in the prefix {self.prefix}."
assert any(expected_msg in str(w.message) for w in recwarn.list)
def test_warning_naming_conflict_add_adapter(self, recwarn):
# Warning is raised when adding an adapter with a name that conflicts with the prefix.
conflicting_adapter = self.prefix[1:]
non_conflict_adapter = "adapter"
model = get_peft_model(self.base_model, self.peft_config, adapter_name=non_conflict_adapter)
_ = model.add_adapter(conflicting_adapter, self.peft_config)
expected_msg = f"Adapter name {conflicting_adapter} should not be contained in the prefix {self.prefix}."
assert any(expected_msg in str(w.message) for w in recwarn.list)
def test_warning_naming_conflict_save_and_load(self, recwarn, tmp_path):
# Warning is raised when saving and loading the model with a naming conflict.
conflicting_adapter = self.prefix[:-1]
model = get_peft_model(self.base_model, self.peft_config, adapter_name=conflicting_adapter)
_ = self._save_and_reload_model(model, conflicting_adapter, tmp_path)
expected_msg = f"Adapter name {conflicting_adapter} should not be contained in the prefix {self.prefix}."
assert any(expected_msg in str(w.message) for w in recwarn.list)
class TestCordaInitialization:
"""Test class to check the initialization of CorDA adapters."""
torch_device = infer_device()
def get_model(self):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
# choose a large weight so that averages are close to expected values
self.linear = nn.Linear(1000, 1000)
def forward(self, x):
return self.linear(x)
return MyModule().eval().to(self.torch_device)
@pytest.fixture
def data(self):
# larger data is required to pass KPM test
torch.manual_seed(233)
return torch.rand(1000, 1000).to(self.torch_device)
@pytest.mark.parametrize("corda_method", ("ipm", "kpm"))
def test_lora_corda_no_redundant_fields(self, data, corda_method):
original_model = self.get_model()
model = deepcopy(original_model)
corda_config = CordaConfig(
corda_method=corda_method,
)
config = LoraConfig(
init_lora_weights="corda",
target_modules=["linear"],
corda_config=corda_config,
)
preprocess_corda(
model,
config,
run_model=lambda: model(data),
hooked_model=model,
)
peft_model = get_peft_model(model, config)
# check if the redundant fields are removed
assert not hasattr(peft_model.base_model.linear, "sample_count")
assert not hasattr(peft_model.base_model.linear, "covariance_matrix")
assert not hasattr(peft_model.base_model.linear, "corda_method")
assert not hasattr(peft_model.base_model.linear, "rank")
assert not hasattr(peft_model.base_model.linear, "eigens")
# legacy debug fields
assert not hasattr(peft_model.base_model.linear, "mean")
assert not hasattr(peft_model.base_model.linear, "std")
@pytest.mark.parametrize("corda_method", ("ipm", "kpm"))
def test_lora_corda_sample_count(self, data, corda_method):
original_model = self.get_model()
model = deepcopy(original_model)
corda_config = CordaConfig(
corda_method=corda_method,
prune_temporary_fields=False,
)
config = LoraConfig(
init_lora_weights="corda",
target_modules=["linear"],
corda_config=corda_config,
)
preprocess_corda(
model,
config,
run_model=lambda: [model(data), model(data)], # running model twice to test `sample_count`
hooked_model=model,
)
# covariance of linear should be data.T @ data
layer = model.linear
assert hasattr(layer, "covariance_matrix")
assert torch.allclose(layer.covariance_matrix, data.T @ data, atol=1e-06)
# sample count of linear should be 2
assert hasattr(layer, "sample_count")
assert layer.sample_count == 2
@pytest.mark.parametrize("corda_method", ("ipm", "kpm"))
def test_lora_corda_hook_unregister(self, data, corda_method):
original_model = self.get_model()
model = deepcopy(original_model)
hook_call_count = 0
def hook(*args):
nonlocal hook_call_count
hook_call_count += 1
model.linear.register_forward_hook(hook)
corda_config = CordaConfig(
corda_method=corda_method,
prune_temporary_fields=False,
)
config = LoraConfig(
init_lora_weights="corda",
target_modules=["linear"],
corda_config=corda_config,
)
preprocess_corda(
model,
config,
run_model=lambda: model(data),
hooked_model=model,
)
# after preprocessing, external and internal hook should be run once
assert hook_call_count == 1
assert model.linear.sample_count == 1
# run preprocessed model once
model(data)[0]
# the external hook should be kept, but the internal hook should be gone
assert hook_call_count == 2
assert model.linear.sample_count == 1
@pytest.mark.parametrize("corda_method", ("ipm", "kpm"))
def test_lora_corda_linear_init_default(self, data, tmp_path, corda_method):
original_model = self.get_model()
model = deepcopy(original_model)
output_base = model(data)[0]
corda_config = CordaConfig(
cache_file=tmp_path / "corda_cache.pt",
covariance_file=tmp_path / "covariance_cache.pt",
corda_method=corda_method,
)
config = LoraConfig(
init_lora_weights="corda",
target_modules=["linear"],
corda_config=corda_config,
)
preprocess_corda(
model,
config,
run_model=lambda: model(data),
hooked_model=model,
)
peft_model = get_peft_model(model, config)
# check if adapter performs an identity transformantion
assert torch.allclose(output_base, peft_model(data)[0], atol=1e-06)
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_corda = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_corda, atol=tol, rtol=tol)
# if load SVD result from cache, the output should be the same
model = deepcopy(original_model)
config = LoraConfig(
init_lora_weights="corda",
target_modules=["linear"],
corda_config=CordaConfig(cache_file=tmp_path / "corda_cache.pt", corda_method=corda_method),
)
preprocess_corda(model, config)
peft_model = get_peft_model(model, config)
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
assert torch.allclose(output_corda, peft_model(data)[0], atol=1e-06)
# if load covariance from cache, the output should be the same
model = deepcopy(original_model)
config = LoraConfig(
init_lora_weights="corda",
target_modules=["linear"],
corda_config=CordaConfig(covariance_file=tmp_path / "covariance_cache.pt", corda_method=corda_method),
)
preprocess_corda(model, config)
peft_model = get_peft_model(model, config)
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
assert torch.allclose(output_corda, peft_model(data)[0], atol=1e-06)
@pytest.mark.parametrize("corda_method", ("ipm", "kpm"))
def test_lora_corda_hooked_model_linear_init_default(self, data, tmp_path, corda_method):
original_model = self.get_model()
model = deepcopy(original_model)
hooked_model = deepcopy(model)
output_base = model(data)[0]
corda_config = CordaConfig(
cache_file=tmp_path / "corda_cache.pt",
covariance_file=tmp_path / "covariance_cache.pt",
corda_method=corda_method,
)
config = LoraConfig(
init_lora_weights="corda",
target_modules=["linear"],
corda_config=corda_config,
)
# difference from the above test: this test uses a copied model as hooked model
preprocess_corda(
model,
config,
run_model=lambda: hooked_model(data),
hooked_model=hooked_model,
)
peft_model = get_peft_model(model, config)
# check if adapter performs an identity transformantion
assert torch.allclose(output_base, peft_model(data)[0], atol=1e-06)
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_corda = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_corda, atol=tol, rtol=tol)
# if load SVD result from cache, the output should be the same
model = deepcopy(original_model)
config = LoraConfig(
init_lora_weights="corda",
target_modules=["linear"],
corda_config=CordaConfig(cache_file=tmp_path / "corda_cache.pt", corda_method=corda_method),
)
preprocess_corda(model, config)
peft_model = get_peft_model(model, config)
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
assert torch.allclose(output_corda, peft_model(data)[0], atol=1e-06)
# if load covariance from cache, the output should be the same
model = deepcopy(original_model)
config = LoraConfig(
init_lora_weights="corda",
target_modules=["linear"],
corda_config=CordaConfig(covariance_file=tmp_path / "covariance_cache.pt", corda_method=corda_method),
)
preprocess_corda(model, config)
peft_model = get_peft_model(model, config)
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
assert torch.allclose(output_corda, peft_model(data)[0], atol=1e-06)
@pytest.mark.parametrize("corda_method", ("ipm", "kpm"))
def test_lora_corda_linear_init_default_with_rank_pattern(self, data, tmp_path, corda_method):
original_model = self.get_model()
model = deepcopy(original_model)
output_base = model(data)[0]
corda_config = CordaConfig(
cache_file=tmp_path / "corda_cache.pt",
covariance_file=tmp_path / "covariance_cache.pt",
corda_method=corda_method,
)
config = LoraConfig(
rank_pattern={"linear": 8, "embed": 16, "conv2d": 32},
init_lora_weights="corda",
target_modules=["linear"],
corda_config=corda_config,
)
preprocess_corda(
model,
config,
run_model=lambda: model(data),
)
peft_model = get_peft_model(model, config)
# check if adapter performs an identity transformantion
assert torch.allclose(output_base, peft_model(data)[0], atol=1e-06)
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_corda = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_corda, atol=tol, rtol=tol)
# if load SVD result from cache, the output should be the same
model = deepcopy(original_model)
config = LoraConfig(
rank_pattern={"linear": 8, "embed": 16, "conv2d": 32},
init_lora_weights="corda",
target_modules=["linear"],
corda_config=CordaConfig(cache_file=tmp_path / "corda_cache.pt", corda_method=corda_method),
)
preprocess_corda(model, config)
peft_model = get_peft_model(model, config)
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
assert torch.allclose(output_corda, peft_model(data)[0], atol=1e-06)
# if load covariance from cache, the output should be the same
model = deepcopy(original_model)
config = LoraConfig(
rank_pattern={"linear": 8, "embed": 16, "conv2d": 32},
init_lora_weights="corda",
target_modules=["linear"],
corda_config=CordaConfig(covariance_file=tmp_path / "covariance_cache.pt", corda_method=corda_method),
)
preprocess_corda(model, config)
peft_model = get_peft_model(model, config)
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
assert torch.allclose(output_corda, peft_model(data)[0], atol=1e-06)
@pytest.mark.parametrize("corda_method", ("ipm", "kpm"))
def test_lora_corda_conversion_same_output_after_loading(self, data, tmp_path, corda_method):
model = self.get_model()
output_base = model(data)[0]
corda_config = CordaConfig(corda_method=corda_method)
config = LoraConfig(init_lora_weights="corda", target_modules=["linear"], r=8, corda_config=corda_config)
preprocess_corda(model, config, run_model=lambda: model(data), hooked_model=model)
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.peft_config["default"].init_lora_weights = True
peft_model.save_pretrained(tmp_path / "init-model")
peft_model.peft_config["default"].init_lora_weights = "corda"
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_corda = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_corda, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "corda-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "corda-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_corda, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 8
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_config_keys_before = list(peft_model.peft_config.keys())
peft_config_dict_before = peft_model.peft_config["default"].to_dict()
peft_model.save_pretrained(
tmp_path / "corda-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
peft_config_keys_after = list(peft_model.peft_config.keys())
peft_config_dict_after = peft_model.peft_config["default"].to_dict()
assert peft_config_keys_before == peft_config_keys_after
assert peft_config_dict_before == peft_config_dict_after
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "corda-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_corda, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
@pytest.mark.parametrize("corda_method", ("ipm", "kpm"))
def test_lora_corda_conversion_same_output_after_loading_with_rank_pattern(self, data, tmp_path, corda_method):
# same as above, but using rank_pattern
model = self.get_model()
output_base = model(data)[0]
# use rank_pattern here; note that since there is only a single linear layer, r is completely overridden
corda_config = CordaConfig(corda_method=corda_method)
config = LoraConfig(
init_lora_weights="corda",
target_modules=["linear"],
r=8,
rank_pattern={"linear": 32},
corda_config=corda_config,
)
preprocess_corda(model, config, run_model=lambda: model(data), hooked_model=model)
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.peft_config["default"].init_lora_weights = True
peft_model.save_pretrained(tmp_path / "init-model")
peft_model.peft_config["default"].init_lora_weights = "corda"
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_corda = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_corda, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "corda-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "corda-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_corda, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 32
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_model.save_pretrained(
tmp_path / "corda-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "corda-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_corda, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 64
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
@pytest.mark.parametrize("corda_method", ("ipm", "kpm"))
def test_lora_corda_conversion_same_output_after_loading_with_alpha_pattern(self, data, tmp_path, corda_method):
# same as above, but using alpha_pattern
model = self.get_model()
output_base = model(data)[0]
# use alpha_pattern here; note that since there is only a single linear layer, lora_alpha is completely
# overridden
corda_config = CordaConfig(corda_method=corda_method)
config = LoraConfig(
init_lora_weights="corda",
target_modules=["linear"],
alpha_pattern={"linear": 5},
corda_config=corda_config,
)
preprocess_corda(model, config, run_model=lambda: model(data), hooked_model=model)
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.peft_config["default"].init_lora_weights = True
peft_model.save_pretrained(tmp_path / "init-model")
peft_model.peft_config["default"].init_lora_weights = "corda"
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_corda = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_corda, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "corda-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "corda-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_corda, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 8
assert model_loaded.base_model.model.linear.scaling["default"] == 5 / 8
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_model.save_pretrained(
tmp_path / "corda-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "corda-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_corda, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
assert model_converted.base_model.model.linear.scaling["default"] == 10 / 16
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
@pytest.mark.parametrize("corda_method", ("ipm", "kpm"))
def test_lora_corda_conversion_same_output_after_loading_with_rslora(self, data, tmp_path, corda_method):
model = self.get_model()
output_base = model(data)[0]
corda_config = CordaConfig(corda_method=corda_method)
config = LoraConfig(
init_lora_weights="corda", target_modules=["linear"], r=8, use_rslora=True, corda_config=corda_config
)
preprocess_corda(model, config, run_model=lambda: model(data), hooked_model=model)
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.peft_config["default"].init_lora_weights = True
peft_model.save_pretrained(tmp_path / "init-model")
peft_model.peft_config["default"].init_lora_weights = "corda"
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_corda = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_corda, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "corda-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "corda-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_corda, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 8
assert model_loaded.base_model.model.linear.scaling["default"] == 8 / (8**0.5)
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_model.save_pretrained(
tmp_path / "corda-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "corda-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_corda, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
# same scale as before with a little bit of floating point imprecision
assert model_converted.base_model.model.linear.scaling["default"] == pytest.approx(8 / (8**0.5))
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
@pytest.mark.parametrize("corda_method", ("ipm", "kpm"))
def test_lora_corda_rank_pattern_and_rslora_raises(self, data, tmp_path, corda_method):
# it's not possible to determine the correct scale when using rslora with rank or alpha pattern, because the
# scale is not stored in the state_dict
model = self.get_model()
corda_config = CordaConfig(corda_method=corda_method)
config = LoraConfig(
init_lora_weights="corda",
target_modules=["linear"],
r=8,
rank_pattern={"linear": 2},
use_rslora=True,
corda_config=corda_config,
)
preprocess_corda(model, config, run_model=lambda: model(data), hooked_model=model)
peft_model = get_peft_model(model, config)
peft_model.save_pretrained(tmp_path / "init-model")
msg = re.escape("Passing `path_initial_model_for_weight_conversion` to `save_pretrained`")
with pytest.raises(ValueError, match=msg):
peft_model.save_pretrained(
tmp_path / "corda-model", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
@pytest.mark.parametrize("corda_method", ("ipm", "kpm"))
def test_lora_corda_alpha_pattern_and_rslora_raises(self, data, tmp_path, corda_method):
# it's not possible to determine the correct scale when using rslora with rank or alpha pattern, because the
# scale is not stored in the state_dict
model = self.get_model()
corda_config = CordaConfig(corda_method=corda_method)
config = LoraConfig(
init_lora_weights="corda",
target_modules=["linear"],
r=8,
alpha_pattern={"linear": 2},
use_rslora=True,
corda_config=corda_config,
)
preprocess_corda(model, config, run_model=lambda: model(data), hooked_model=model)
peft_model = get_peft_model(model, config)
peft_model.save_pretrained(tmp_path / "init-model")
msg = re.escape("Passing `path_initial_model_for_weight_conversion` to `save_pretrained`")
with pytest.raises(ValueError, match=msg):
peft_model.save_pretrained(
tmp_path / "corda-model", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
class TestEvaInitialization:
"""Tests for the EVA (Explained Variance Adaptation) initialization method.
This test suite verifies:
1. Consistency of initialization across different seeds
2. Proper error handling for invalid inputs
3. Compatibility with different model architectures
4. Reproducibility of results
5. Proper handling of edge cases
"""
# Constants for test configuration
COSINE_SIMILARITY_THRESHOLD = 0.75
NUM_SEEDS = 2
BATCH_SIZE = 4
MAX_LENGTH = 256
LORA_DIM = 8
LORA_ALPHA = 1
DEVICE = infer_device()
@pytest.fixture
def tokenizer(self):
tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
tokenizer.pad_token = tokenizer.eos_token
return tokenizer
@pytest.fixture
def dataset(self, tokenizer):
dataset = load_dataset("ybelkada/english_quotes_copy", split="train")
# concatenate examples
examples = []
example = ""
for data in dataset:
if len(example) >= self.MAX_LENGTH:
examples.append(example)
example = ""
example = example + " " + data["quote"]
dataset = Dataset.from_dict({"text": examples})
# tokenize
dataset = dataset.map(
lambda x: tokenizer(x["text"], padding="max_length", truncation=True, max_length=self.MAX_LENGTH),
batched=True,
remove_columns=dataset.column_names,
)
dataset.set_format(type="torch")
return dataset
@pytest.fixture
def model(self):
model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
model.transformer.h = model.transformer.h[:2] # truncate to 2 layers
return model.to(self.DEVICE)
@pytest.fixture
def peft_config(self):
return LoraConfig(
r=self.LORA_DIM,
lora_alpha=self.LORA_ALPHA,
target_modules=["c_attn"],
init_lora_weights="eva",
eva_config=EvaConfig(rho=2),
)
@staticmethod
def collate_fn(examples):
return {k: torch.stack([v[k] for v in examples], dim=0) for k in examples[0].keys()}
@staticmethod
def prepare_layer_inputs_fn(layer_input, model_input, layer_name):
return layer_input[0].view(-1, layer_input[0].size(-1))
def get_dataloader(self, dataset):
return DataLoader(
dataset,
batch_size=self.BATCH_SIZE,
collate_fn=self.collate_fn,
shuffle=False,
)
@pytest.mark.parametrize(
"prepare_layer_inputs_keys, expected_outcome",
[
(None, "success"),
(["transformer.h.0.attn.c_attn"], "success"),
(
["transformer.h.0.attn.c_attn", "transformer.h.1.attn.c_attn", "transformer.h.2.attn.c_attn"],
"value_error",
),
],
)
def test_eva_state_dict_prepare_inputs_mapping(
self, model, dataset, peft_config, prepare_layer_inputs_keys, expected_outcome
):
"""
Tests for cases where prepare_layer_inputs_fn is a mapping. Checks that if not all target modules are present,
the prepare_layer_inputs_fn for the remaining modules is set to None. Also checks that if more keys than target
modules are present, a ValueError is raised.
"""
def fn(x, *args):
return x[0].view(-1, x[0].size(-1))
if prepare_layer_inputs_keys is None:
prepare_layer_inputs_fn = fn
else:
prepare_layer_inputs_fn = {k: fn for k in prepare_layer_inputs_keys}
shuffled_dataset = dataset.shuffle(seed=0)
dataloader = self.get_dataloader(shuffled_dataset)
modified_peft_config = deepcopy(peft_config)
modified_peft_config.eva_config.tau = 0 # converge immediately
if expected_outcome == "success":
sd = get_eva_state_dict(
model,
dataloader,
modified_peft_config,
prepare_model_inputs_fn=None,
prepare_layer_inputs_fn=prepare_layer_inputs_fn,
)
assert len(sd) == 2
assert "transformer.h.0.attn.c_attn" in sd
assert "transformer.h.1.attn.c_attn" in sd
else:
with pytest.raises(
ValueError, match="prepare_layer_inputs_fn is a mapping but the following module names were not found"
):
get_eva_state_dict(
model,
dataloader,
modified_peft_config,
prepare_model_inputs_fn=None,
prepare_layer_inputs_fn=prepare_layer_inputs_fn,
)
@pytest.mark.parametrize(
"eva_config",
[EvaConfig(rho=2, adjust_scaling_factors=True)],
)
def test_eva_state_dict_adjust_scaling_factors(self, model, dataset, peft_config, eva_config):
"""
Tests that the scaling factors are adjusted so that all LoRA gradients have the same scale regardless of their
rank.
"""
modified_peft_config = deepcopy(peft_config)
modified_peft_config.eva_config = eva_config
dataloader = self.get_dataloader(dataset)
peft_model = get_peft_model(deepcopy(model), modified_peft_config)
scaling_factors_before = {}
for n, m in peft_model.named_modules():
if isinstance(m, LoraLayer):
scaling_factors_before[n] = m.scaling["default"]
initialize_lora_eva_weights(peft_model, dataloader)
for n, m in peft_model.named_modules():
if isinstance(m, LoraLayer):
assert m.scaling["default"] == scaling_factors_before[n]
@pytest.mark.parametrize(
"eva_config",
[
# note: lower tau to decrease number of iterations until convergence, as tests are slow on CPU
EvaConfig(rho=2, tau=0.9),
EvaConfig(rho=1, tau=0.9),
EvaConfig(rho=1, whiten=True, tau=0.9),
EvaConfig(rho=1.0001, tau=0.9),
],
)
def test_eva_initialization_consistency(self, model, dataset, peft_config, eva_config):
"""
Tests that the state dict returned by `get_eva_state_dict` is consistent across different seeds based on the
cosine similarity of the svd components.
"""
modified_peft_config = deepcopy(peft_config)
modified_peft_config.eva_config = eva_config
state_dicts = []
for seed in range(self.NUM_SEEDS):
shuffled_dataset = dataset.shuffle(seed=seed)
dataloader = self.get_dataloader(shuffled_dataset)
sd = get_eva_state_dict(model, dataloader, modified_peft_config, show_progress_bar=False)
state_dicts.append(sd)
cos_sims = defaultdict(list)
for i, j in itertools.combinations(range(self.NUM_SEEDS), 2):
for k, v1 in state_dicts[i].items():
v2 = state_dicts[j][k]
min_size = min(v1.size(0), v2.size(0))
cos_sims[k].extend(torch.cosine_similarity(v1[:min_size].abs(), v2[:min_size].abs(), dim=1).tolist())
mean_cosine_similarities = {k: torch.tensor(v).mean() for k, v in cos_sims.items()}
for layer_name, mean_cosine_similarity in mean_cosine_similarities.items():
assert mean_cosine_similarity > self.COSINE_SIMILARITY_THRESHOLD, (
f"Mean absolute cosine similarity {mean_cosine_similarity:.4f} "
f"is not greater than {self.COSINE_SIMILARITY_THRESHOLD}"
)
@pytest.mark.parametrize("has_rank_zero", [True, False])
def test_load_eva_state_dict(self, model, dataset, peft_config, tmp_path, has_rank_zero):
"""
Tests that the `eva_state_dict` argument in `initialize_lora_eva_weights` can be used to initialize a model
with EVA weights and that the initialized model can be saved and loaded correctly.
"""
dataloader = self.get_dataloader(dataset)
peft_model = get_peft_model(deepcopy(model), peft_config)
sd = get_eva_state_dict(peft_model, dataloader)
if has_rank_zero:
k = "base_model.model.transformer.h.0.attn.c_attn"
sd[k] = sd[k][:0]
initialize_lora_eva_weights(peft_model, eva_state_dict=sd)
if has_rank_zero:
assert not isinstance(peft_model.model.transformer.h[0].attn.c_attn, LoraLayer)
else:
assert isinstance(peft_model.model.transformer.h[0].attn.c_attn, LoraLayer)
peft_model.save_pretrained(tmp_path)
peft_model = PeftModel.from_pretrained(model, tmp_path, torch_device=self.DEVICE, low_cpu_mem_usage=True)
peft_model(**{k: v.to(self.DEVICE) for k, v in next(iter(dataloader)).items()})
def test_missing_eva_inits(self, model, dataset, peft_config):
"""
Tests that a warning is raised when some adapter modules were not initialized with EVA weights.
"""
modified_peft_config = deepcopy(peft_config)
modified_peft_config.target_modules = ["wte"]
dataloader = self.get_dataloader(dataset)
peft_model = get_peft_model(deepcopy(model), modified_peft_config)
with pytest.warns(
UserWarning,
match="the following layers were initialized with init_lora_weights=True because they were not found in the eva state_dict:*",
):
initialize_lora_eva_weights(peft_model, dataloader)
def test_load_eva_model(self, model, dataset, peft_config, tmp_path):
"""
Tests that a model initialized with EVA weights can be loaded correctly.
"""
dataloader = self.get_dataloader(dataset)
peft_model = get_peft_model(deepcopy(model), peft_config)
initialize_lora_eva_weights(peft_model, dataloader)
peft_model.save_pretrained(tmp_path)
peft_model = PeftModel.from_pretrained(model, tmp_path, torch_device=self.DEVICE, low_cpu_mem_usage=True)
peft_model(**{k: v.to(self.DEVICE) for k, v in next(iter(dataloader)).items()})
def test_eva_initialization_with_invalid_dataloader(self, model, peft_config):
"""Test that appropriate error is raised when dataloader is empty."""
empty_dataset = Dataset.from_dict({"text": []})
dataloader = self.get_dataloader(empty_dataset)
with pytest.raises(ValueError, match="dataloader is empty"):
get_eva_state_dict(model, dataloader, peft_config)
def test_eva_config_rho(self):
"""
Tests that EvaConfig.__init__ raises a ValueError when rho is negative.
"""
with pytest.raises(ValueError, match="`rho` must be >= 1.0"):
EvaConfig(rho=-1)
def test_eva_config_tau(self):
"""
Tests that EvaConfig.__init__ raises a ValueError when tau is not between 0.0 and 1.0.
"""
with pytest.raises(ValueError, match="`tau` must be between 0.0 and 1.0."):
EvaConfig(tau=-0.1)
with pytest.raises(ValueError, match="`tau` must be between 0.0 and 1.0."):
EvaConfig(tau=1.1)
def test_lora_config_raises_warning_with_eva_init_but_not_eva_config(self):
"""
Tests that LoraConfig.__init__ raises a warning when init_lora_weights='eva' but eva_config is not set.
"""
with pytest.warns(
UserWarning,
match="`init_lora_weights` is 'eva' but `eva_config` is not specified. Using default EVA config.",
):
LoraConfig(init_lora_weights="eva")
def test_lora_config_raises_warning_with_eva_config_but_not_eva_init(self):
"""
Tests that LoraConfig.__init__ raises a warning when init_lora_weights is not 'eva' but eva_config is set.
"""
with pytest.warns(
UserWarning, match="`eva_config` specified but will be ignored when `init_lora_weights` is not 'eva'."
):
LoraConfig(init_lora_weights=True, eva_config=EvaConfig())
@pytest.mark.skipif(
platform.system() != "Linux", reason="Out of the box, torch.compile does not work on Windows or MacOS"
)
class TestHotSwapping:
"""Tests for the hotswapping function"""
torch_device = infer_device()
def compile(self, model, do_compile):
if not do_compile:
return model
return torch.compile(model)
def get_model(self):
class MLP(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.lin0 = nn.Linear(10, 20, bias=True)
self.relu = nn.ReLU()
self.lin1 = nn.Linear(20, 5, bias=False)
def forward(self, X):
X = X.float()
X = self.lin0(X)
X = self.relu(X)
X = self.lin1(X)
return X
torch.manual_seed(0)
return MLP().to(self.torch_device)
def get_model_conv2d(self):
class ConvModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 10, kernel_size=3)
def forward(self, X):
return self.conv(X)
torch.manual_seed(0)
return ConvModel().to(self.torch_device)
# this works with all adapters except prompt learning, but we don't test all
# as it is unnecessary and would be slow
@pytest.mark.parametrize(
"config",
[
LoraConfig(init_lora_weights=0, target_modules=["lin0"]),
LoraConfig(init_lora_weights=0, target_modules=["lin0", "lin1"]),
],
)
@pytest.mark.parametrize("do_compile", [False, True])
def test_hotswap_works(self, config, do_compile, tmp_path):
# Load 2 different adapters and check that we can hotswap between them, with the model optionally being
# compiled.
atol, rtol = 1e-4, 1e-4
inputs = torch.rand(3, 10).to(self.torch_device)
# create adapter 0
model = self.get_model()
torch.manual_seed(0)
model = get_peft_model(model, config)
model = self.compile(model, do_compile=do_compile)
model.eval()
with torch.inference_mode():
output0 = model(inputs)
model.save_pretrained(tmp_path / "adapter0")
del model
# create adapter 1
model = self.get_model()
torch.manual_seed(1)
model = get_peft_model(model, config)
model = self.compile(model, do_compile=do_compile)
model.eval()
with torch.inference_mode():
output1 = model(inputs)
model.save_pretrained(tmp_path / "adapter1")
# sanity check: they're not the same
assert not torch.allclose(output0, output1, atol=atol, rtol=rtol)
del model
# load adapter 0
model = self.get_model()
model = PeftModel.from_pretrained(model, tmp_path / "adapter0")
model = self.compile(model, do_compile=do_compile)
with torch.inference_mode():
output_loaded0 = model(inputs)
# sanity check: same output after loading for adapter 0
assert torch.allclose(output0, output_loaded0, atol=atol, rtol=rtol)
# hotswap with adapter 1
hotswap_adapter(model, tmp_path / "adapter1", adapter_name="default")
with torch.inference_mode():
output_loaded1 = model(inputs)
# real check: model now behaves like adapter 1
assert torch.allclose(output1, output_loaded1, atol=atol, rtol=rtol)
# hotswap back to adapter 0
hotswap_adapter(model, tmp_path / "adapter0", adapter_name="default")
with torch.inference_mode():
output_loaded_back0 = model(inputs)
# real check: model now behaves again like adapter 0
assert torch.allclose(output0, output_loaded_back0, atol=atol, rtol=rtol)
def test_hotswap_different_peft_types_raises(self, tmp_path):
# When the configs of the two adapters are different PEFT methods, raise
config0 = LoraConfig(target_modules=["lin0"])
config1 = IA3Config(target_modules=["lin0"], feedforward_modules=[])
model = self.get_model()
model = get_peft_model(model, config0)
model.save_pretrained(tmp_path / "adapter0")
del model
model = self.get_model()
model = get_peft_model(model, config1)
model.save_pretrained(tmp_path / "adapter1")
del model
# load adapter 0
model = self.get_model()
model = PeftModel.from_pretrained(model, tmp_path / "adapter0")
msg = r"Incompatible PEFT types found: LORA and IA3"
with pytest.raises(ValueError, match=msg):
hotswap_adapter(model, tmp_path / "adapter1", adapter_name="default")
def test_hotswap_wrong_peft_types_raises(self, tmp_path):
# Only LoRA is supported at the moment
config0 = IA3Config(target_modules=["lin0"], feedforward_modules=[])
config1 = IA3Config(target_modules=["lin0"], feedforward_modules=[])
model = self.get_model()
model = get_peft_model(model, config0)
model.save_pretrained(tmp_path / "adapter0")
del model
model = self.get_model()
model = get_peft_model(model, config1)
model.save_pretrained(tmp_path / "adapter1")
del model
# load adapter 0
model = self.get_model()
model = PeftModel.from_pretrained(model, tmp_path / "adapter0")
msg = r"Hotswapping only supports LORA but IA3 was passed"
with pytest.raises(ValueError, match=msg):
hotswap_adapter(model, tmp_path / "adapter1", adapter_name="default")
def test_hotswap_missing_key_works(self, tmp_path):
# When a key is missing, it is fine, the extra weight is zeroed out
config = LoraConfig(target_modules=["lin0", "lin1"])
model = self.get_model()
model = get_peft_model(model, config)
model.save_pretrained(tmp_path / "adapter0")
del model
model = self.get_model()
model = get_peft_model(model, config)
# remove one key from the state_dict
key = "base_model.model.lin1.lora_A.default.weight"
state_dict = model.state_dict()
del state_dict[key]
model.state_dict = lambda: state_dict
model.save_pretrained(tmp_path / "adapter1")
del model
# load adapter 0
model = self.get_model()
model = PeftModel.from_pretrained(model, tmp_path / "adapter0")
# sanity check: the missing weight is not already all zeros
assert not (model.base_model.model.lin1.lora_A["default"].weight == 0).all()
hotswap_adapter(model, tmp_path / "adapter1", adapter_name="default")
# after hotswapping, it is zeroed out
assert (model.base_model.model.lin1.lora_A["default"].weight == 0).all()
def test_hotswap_extra_key_raises(self, tmp_path):
# When there is an extra key, raise
config = LoraConfig(target_modules=["lin0"])
model = self.get_model()
model = get_peft_model(model, config)
model.save_pretrained(tmp_path / "adapter0")
del model
model = self.get_model()
model = get_peft_model(model, config)
# add an unexpected key
state_dict = model.state_dict()
new_key = "base_model.model.lin1.lora_A.default.weight"
state_dict[new_key] = torch.zeros(8, 20)
model.state_dict = lambda: state_dict
model.save_pretrained(tmp_path / "adapter1")
del model
# load adapter 0
model = self.get_model()
model = PeftModel.from_pretrained(model, tmp_path / "adapter0")
msg = f"Hot swapping the adapter did not succeed, unexpected keys found: {new_key}"
with pytest.raises(RuntimeError, match=msg):
hotswap_adapter(model, tmp_path / "adapter1", adapter_name="default")
def test_prepare_model_for_compiled_hotswap_scalings_are_tensors(self):
config = LoraConfig(target_modules=["lin0", "lin1"])
model = self.get_model()
model = get_peft_model(model, config)
# sanity check: all scalings are floats
scalings_before = {}
for name, module in model.named_modules():
if hasattr(module, "scaling"):
for key, val in module.scaling.items():
assert isinstance(val, float)
scalings_before[f"{name}.{key}"] = val
prepare_model_for_compiled_hotswap(model)
scalings_after = {}
for name, module in model.named_modules():
if hasattr(module, "scaling"):
for key, val in module.scaling.items():
assert isinstance(val, torch.Tensor)
scalings_after[f"{name}.{key}"] = val.item()
assert scalings_before == scalings_after
def test_prepare_model_for_compiled_hotswap_rank_padding_works(self):
old_rank = 8
config = LoraConfig(target_modules=["lin0", "lin1"], r=old_rank)
model = self.get_model()
model = get_peft_model(model, config)
# sanity check
for name, param in model.named_parameters():
if "lora_A" in name:
assert param.shape[0] == old_rank
elif "lora_B" in name:
assert param.shape[1] == old_rank
new_rank = 13
prepare_model_for_compiled_hotswap(model, target_rank=new_rank)
for name, param in model.named_parameters():
if "lora_A" in name:
assert param.shape[0] == new_rank
elif "lora_B" in name:
assert param.shape[1] == new_rank
def test_prepare_model_for_compiled_hotswap_same_rank_padding_works(self):
# same as previous test, but ensure there is no error if the rank to pad to is the same
old_rank = 8
config = LoraConfig(target_modules=["lin0", "lin1"], r=old_rank)
model = self.get_model()
model = get_peft_model(model, config)
prepare_model_for_compiled_hotswap(model, target_rank=old_rank)
for name, param in model.named_parameters():
if "lora_A" in name:
assert param.shape[0] == old_rank
elif "lora_B" in name:
assert param.shape[1] == old_rank
def test_prepare_model_for_compiled_hotswap_conv2d_rank_padding_works(self):
# same as previous test, but for a Conv2d model
old_rank = 8
config = LoraConfig(target_modules=["conv"], r=old_rank)
model = self.get_model_conv2d()
model = get_peft_model(model, config)
# sanity check
for name, param in model.named_parameters():
if "lora_A" in name:
assert param.shape[0] == old_rank
elif "lora_B" in name:
assert param.shape[1] == old_rank
new_rank = 13
prepare_model_for_compiled_hotswap(model, target_rank=new_rank)
for name, param in model.named_parameters():
if "lora_A" in name:
assert param.shape[0] == new_rank
elif "lora_B" in name:
assert param.shape[1] == new_rank
def test_prepare_model_for_compiled_hotswap_lower_rank_padding_raises(self):
# when trying to pad to a lower rank, raise an error
old_rank0 = 8
old_rank1 = 10
new_rank = 9
config = LoraConfig(target_modules=["lin0", "lin1"], r=old_rank0, rank_pattern={"lin1": old_rank1})
model = self.get_model()
model = get_peft_model(model, config)
msg = re.escape("Trying to pad the adapter to the target rank 9, but the original rank is larger (10)")
with pytest.raises(ValueError, match=msg):
prepare_model_for_compiled_hotswap(model, target_rank=new_rank)
def test_prepare_model_for_compiled_hotswap_with_rank_pattern(self):
old_rank0 = 8
old_rank1 = 9
config = LoraConfig(target_modules=["lin0", "lin1"], r=old_rank0, rank_pattern={"lin1": old_rank1})
model = self.get_model()
model = get_peft_model(model, config)
# sanity check
for name, param in model.named_parameters():
if "lora_A" in name:
if "lin0" in name:
assert param.shape[0] == old_rank0
else:
assert param.shape[0] == old_rank1
elif "lora_B" in name:
if "lin0" in name:
assert param.shape[1] == old_rank0
else:
assert param.shape[1] == old_rank1
new_rank = 13
prepare_model_for_compiled_hotswap(model, target_rank=new_rank)
for name, param in model.named_parameters():
if "lora_A" in name:
assert param.shape[0] == new_rank
elif "lora_B" in name:
assert param.shape[1] == new_rank
def test_prepare_model_for_compiled_hotswap_model_already_compiled_raises(self):
config = LoraConfig(target_modules=["lin0"])
model = self.get_model()
model = get_peft_model(model, config)
model = torch.compile(model, mode="reduce-overhead")
msg = re.escape("Call prepare_model_for_compiled_hotswap *before* compiling the model")
with pytest.raises(ValueError, match=msg):
prepare_model_for_compiled_hotswap(model)
def test_prepare_model_for_compiled_hotswap_does_not_change_output(self):
# preparing the model for hotswapping should not change the model output
inputs = torch.rand(3, 10).to(self.torch_device)
model = self.get_model().eval()
with torch.inference_mode():
output_base = model(inputs)
old_rank = 8
config = LoraConfig(target_modules=["lin0", "lin1"], r=old_rank, init_lora_weights=False)
model = get_peft_model(model, config).eval()
with torch.inference_mode():
output_before = model(inputs)
# sanity check: LoRA changed output
assert not torch.allclose(output_base, output_before)
new_rank = 13
prepare_model_for_compiled_hotswap(model, target_rank=new_rank)
with torch.inference_mode():
output_after = model(inputs)
assert torch.allclose(output_before, output_after)
def test_prepare_model_for_compiled_hotswap_does_not_change_output_conv2d(self):
# preparing the model for hotswapping should not change the model output
inputs = torch.rand(3, 3, 10, 10).to(self.torch_device)
model = self.get_model_conv2d().eval()
with torch.inference_mode():
output_base = model(inputs)
old_rank = 8
config = LoraConfig(target_modules=["conv"], r=old_rank, init_lora_weights=False)
model = get_peft_model(model, config).eval()
with torch.inference_mode():
output_before = model(inputs)
# sanity check: LoRA changed output
assert not torch.allclose(output_base, output_before)
new_rank = 13
prepare_model_for_compiled_hotswap(model, target_rank=new_rank)
with torch.inference_mode():
output_after = model(inputs)
assert torch.allclose(output_before, output_after)
def test_prepare_model_for_compiled_hotswap_scalings_update_config(self):
old_rank0 = 11
old_rank1 = 13
config = LoraConfig(target_modules=["lin0", "lin1"], r=old_rank0, rank_pattern={"lin1": old_rank1})
model = self.get_model()
model = get_peft_model(model, config)
new_rank = 15
prepare_model_for_compiled_hotswap(model, target_rank=new_rank, config=model.peft_config)
assert model.peft_config["default"].r == new_rank
assert model.peft_config["default"].rank_pattern == {"lin1": new_rank}
def test_prepare_model_for_compiled_hotswap_lora_bias(self):
# When setting lora_bias=True in the LoraConfig, the LoRA B parameter will have a bias term. Check that padding
# still works correctly. Note that the LoRA A parameter still won't have a bias term.
old_rank = 8
config = LoraConfig(target_modules=["lin0", "lin1"], r=old_rank, lora_bias=True)
model = self.get_model()
model = get_peft_model(model, config)
# sanity check
for name, param in model.named_parameters():
if "lora_A" in name and name.endswith(".weight"):
assert param.shape[0] == old_rank
elif "lora_B" in name and name.endswith(".weight"):
assert param.shape[1] == old_rank
elif "lora_A" in name and name.endswith(".bias"):
assert False, "LoRA A should not have a bias term"
elif "lora_B" in name and name.endswith(".bias"):
assert param.shape[0] in (5, 20) # output shapes of the 2 layers
new_rank = 13
prepare_model_for_compiled_hotswap(model, target_rank=new_rank)
for name, param in model.named_parameters():
if "lora_A" in name and name.endswith(".weight"):
assert param.shape[0] == new_rank
elif "lora_B" in name and name.endswith(".weight"):
assert param.shape[1] == new_rank
elif "lora_A" in name and name.endswith(".bias"):
assert False, "LoRA A should not have a bias term"
elif "lora_B" in name and name.endswith(".bias"):
assert param.shape[0] in (5, 20) # output shapes of the 2 layers
def test_prepare_model_for_compiled_hotswap_conv2d_lora_bias(self):
# same as previous test, but for a Conv2d model
old_rank = 8
config = LoraConfig(target_modules=["conv"], r=old_rank, lora_bias=True)
model = self.get_model_conv2d()
model = get_peft_model(model, config)
# sanity check
for name, param in model.named_parameters():
if "lora_A" in name and name.endswith(".weight"):
assert param.shape[0] == old_rank
elif "lora_B" in name and name.endswith(".weight"):
assert param.shape[1] == old_rank
elif "lora_A" in name and name.endswith(".bias"):
assert False, "LoRA A should not have a bias term"
elif "lora_B" in name and name.endswith(".bias"):
assert param.shape[0] == 10 # output shape of conv layer
new_rank = 13
prepare_model_for_compiled_hotswap(model, target_rank=new_rank)
for name, param in model.named_parameters():
if "lora_A" in name and name.endswith(".weight"):
assert param.shape[0] == new_rank
elif "lora_B" in name and name.endswith(".weight"):
assert param.shape[1] == new_rank
elif "lora_A" in name and name.endswith(".bias"):
assert False, "LoRA A should not have a bias term"
elif "lora_B" in name and name.endswith(".bias"):
assert param.shape[0] == 10 # output shape of conv layer
def test_import_peft_type_to_model_mapping_deprecation_warning(recwarn):
# This is for backwards compatibility: In #2282, PEFT_TYPE_TO_MODEL_MAPPING was removed as it was redundant with
# PEFT_TYPE_TO_TUNER_MAPPING. However, third party code could still use this mapping, e.g.:
# https://github.com/AutoGPTQ/AutoGPTQ/blob/6689349625de973b9ee3016c28c11f32acf7f02c/auto_gptq/utils/peft_utils.py#L8
# TODO: Remove after 2026-01
# first check that there is no warning under normal circumstances
from peft.peft_model import PeftModel # noqa
expected = (
"PEFT_TYPE_TO_MODEL_MAPPING is deprecated, please use `from peft import PEFT_TYPE_TO_TUNER_MAPPING` instead"
)
warnings = (w.message.args[0] for w in recwarn.list)
assert not any(w.startswith(expected) for w in warnings)
from peft.peft_model import PEFT_TYPE_TO_MODEL_MAPPING # noqa
# check that there is a warning with this message after importing the variable
warnings = (w.message.args[0] for w in recwarn.list)
assert any(w.startswith(expected) for w in warnings)
| peft/tests/test_initialization.py/0 | {
"file_path": "peft/tests/test_initialization.py",
"repo_id": "peft",
"token_count": 64371
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import huggingface_hub
import pytest
import torch
from safetensors.torch import load_file
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import LoraConfig, PeftType, TaskType, XLoraConfig, get_peft_model
from peft.peft_model import PeftModel
from peft.utils import infer_device
class TestXlora:
torch_device = infer_device()
model_id = "facebook/opt-125m"
num_loras = 4
@pytest.fixture(scope="class")
def lora_dir(self, tmp_path_factory):
return tmp_path_factory.mktemp("lora")
@pytest.fixture(scope="class")
def lora_embedding_dir(self, tmp_path_factory):
return tmp_path_factory.mktemp("lora_embedding")
@pytest.fixture(scope="class")
def saved_lora_adapters(self, lora_dir):
file_names = []
for i in range(1, self.num_loras + 1):
torch.manual_seed(i)
lora_config = LoraConfig(task_type="CAUSAL_LM", init_lora_weights=False)
model = AutoModelForCausalLM.from_pretrained(self.model_id)
peft_model = get_peft_model(model, lora_config)
file_name = os.path.join(lora_dir, f"checkpoint-{i}")
peft_model.save_pretrained(file_name)
file_names.append(file_name)
return file_names
@pytest.fixture(scope="class")
def saved_lora_embedding_adapters(self, lora_embedding_dir):
file_names = []
for i in range(1, self.num_loras + 1):
torch.manual_seed(i)
lora_config = LoraConfig(task_type="CAUSAL_LM", init_lora_weights=False, target_modules=["embed_tokens"])
model = AutoModelForCausalLM.from_pretrained(self.model_id)
peft_model = get_peft_model(model, lora_config)
file_name = os.path.join(lora_embedding_dir, f"checkpoint-{i}")
peft_model.save_pretrained(file_name)
file_names.append(file_name)
return file_names
@pytest.fixture(scope="class")
def tokenizer(self):
tokenizer = AutoTokenizer.from_pretrained(self.model_id, trust_remote_code=True, device_map=self.torch_device)
return tokenizer
@pytest.fixture(scope="function")
def embedding_model(self, saved_lora_embedding_adapters):
model = AutoModelForCausalLM.from_pretrained(self.model_id)
model.config.use_cache = False
adapters = {str(i): file_name for i, file_name in enumerate(saved_lora_embedding_adapters)}
peft_config = XLoraConfig(
task_type=TaskType.CAUSAL_LM,
peft_type=PeftType.XLORA,
hidden_size=model.config.hidden_size,
xlora_depth=8,
adapters=adapters,
)
model = get_peft_model(model, peft_config).to(self.torch_device)
return model
@pytest.fixture(scope="function")
def model(self, saved_lora_adapters):
model = AutoModelForCausalLM.from_pretrained(self.model_id)
model.config.use_cache = False
adapters = {str(i): file_name for i, file_name in enumerate(saved_lora_adapters)}
peft_config = XLoraConfig(
task_type=TaskType.CAUSAL_LM,
peft_type=PeftType.XLORA,
hidden_size=model.config.hidden_size,
xlora_depth=8,
adapters=adapters,
)
model = get_peft_model(model, peft_config).to(self.torch_device)
return model
@pytest.fixture(scope="function")
def model_layerwise(self, saved_lora_adapters):
model = AutoModelForCausalLM.from_pretrained(self.model_id)
model.config.use_cache = False
adapters = {str(i): file_name for i, file_name in enumerate(saved_lora_adapters)}
peft_config = XLoraConfig(
task_type=TaskType.CAUSAL_LM,
peft_type=PeftType.XLORA,
hidden_size=model.config.hidden_size,
xlora_depth=8,
adapters=adapters,
layerwise_scalings=True,
)
model = get_peft_model(model, peft_config).to(self.torch_device)
return model
def test_functional(self, tokenizer, model):
model.enable_scalings_logging()
inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt")
outputs = model.generate(
input_ids=inputs.to(self.torch_device),
max_new_tokens=32,
)
assert torch.isfinite(outputs[: inputs.shape[1] :]).all()
# TODO: fix the xfailing test
@pytest.mark.xfail
def test_scalings_logging_methods(self, tokenizer, model):
model.enable_scalings_logging()
inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt")
outputs = model.generate(
input_ids=inputs.to(self.torch_device),
max_new_tokens=32,
)
assert torch.isfinite(outputs[: inputs.shape[1] :]).all()
_ = model.get_latest_scalings()
# 32 is the numeber of max scalings. 3 is the number of prompt tokens.
assert 32 + 3 >= len(model.get_scalings_log()) > 0
model.disable_scalings_logging()
inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt")
outputs = model.generate(
input_ids=inputs.to(self.torch_device),
max_new_tokens=32,
)
assert torch.isfinite(outputs[: inputs.shape[1] :]).all()
assert 32 >= len(model.get_scalings_log()) > 0
bucketed = model.get_bucketed_scalings_log()
keys = bucketed.keys()
# Once bucket for each token as we aren't using cache
assert len(bucketed) == 32 == len(keys)
seq_len = inputs.shape[1]
for key in keys:
assert len(bucketed[key][0]) == 1
assert len(bucketed[key][1]) == 1
assert bucketed[key][0][0] == key - seq_len
model.clear_scalings_log()
assert len(model.get_scalings_log()) == 0
def test_misc_methods(self, tokenizer, model):
model.set_global_scaling_weight(1.5)
assert model.internal_xlora_classifier.config.global_scaling_weight == 1.5
assert model.get_global_scaling_weight() == 1.5
inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt")
outputs = model.generate(
input_ids=inputs.to(self.torch_device),
max_new_tokens=32,
)
assert torch.isfinite(outputs[: inputs.shape[1] :]).all()
assert str(model) is not None
# TODO: On CI (but not locally), this test seems to have become flaky with the latest transformers changes (v4.45).
@pytest.mark.xfail
def test_save_load_functional(self, tokenizer, model, tmp_path):
inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt")
outputs = model.generate(
input_ids=inputs.to(self.torch_device),
max_new_tokens=32,
)
before_logits = outputs[: inputs.shape[1] :]
assert torch.isfinite(before_logits).all()
model.save_pretrained(save_directory=tmp_path)
del model
model = AutoModelForCausalLM.from_pretrained(self.model_id)
model.config.use_cache = False
model = PeftModel.from_pretrained(model=model, model_id=tmp_path).to(self.torch_device)
inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt")
outputs = model.generate(
input_ids=inputs.to(self.torch_device),
max_new_tokens=32,
)
after_logits = outputs[: inputs.shape[1] :]
assert torch.isfinite(after_logits).all()
assert torch.equal(after_logits, before_logits)
def test_save_load_functional_pt(self, tokenizer, model, tmp_path):
inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt")
outputs = model.generate(
input_ids=inputs.to(self.torch_device),
max_new_tokens=32,
)
before_logits = outputs[: inputs.shape[1] :]
assert torch.isfinite(before_logits).all()
model.save_pretrained(save_directory=tmp_path, safe_serialization=False)
del model
model = AutoModelForCausalLM.from_pretrained(self.model_id)
model.config.use_cache = False
model = PeftModel.from_pretrained(model=model, model_id=tmp_path, safe_serialization=False).to(
self.torch_device
)
inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt")
outputs = model.generate(
input_ids=inputs.to(self.torch_device),
max_new_tokens=32,
)
after_logits = outputs[: inputs.shape[1] :]
assert torch.isfinite(after_logits).all()
assert torch.equal(after_logits, before_logits), (after_logits, before_logits)
def test_topk_lora(self, tokenizer, model):
model.set_topk_lora(2)
assert model.internal_xlora_classifier.config.top_k_lora == 2
inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt")
outputs = model.generate(
input_ids=inputs.to(self.torch_device),
max_new_tokens=32,
)
assert torch.isfinite(outputs[: inputs.shape[1] :]).all()
def test_softmax_topk(self, tokenizer, model):
# Just reach in to set the config
model.internal_xlora_classifier.config.top_k_lora = 2
model.internal_xlora_classifier.config.enable_softmax = False
model.internal_xlora_classifier.config.enable_softmax_topk = True
inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt")
outputs = model.generate(
input_ids=inputs.to(self.torch_device),
max_new_tokens=32,
)
assert torch.isfinite(outputs[: inputs.shape[1] :]).all()
def test_set_override_scaling_pass_value(self, model):
# Defaults to 0
assert model.internal_xlora_classifier.override_scaling_pass_value == 0.0
# Set it to 2 and make sure it actually is
model.set_scaling_pass_value(2)
assert model.internal_xlora_classifier.override_scaling_pass_value == 2
assert model.internal_xlora_classifier.config.scaling_pass_value == 2
# Set it to 2 and make sure it is 1/a
model.set_scaling_pass_value(None)
assert model.internal_xlora_classifier.override_scaling_pass_value == 1 / self.num_loras
assert model.internal_xlora_classifier.config.scaling_pass_value == 1 / self.num_loras
def test_functional_layerwise(self, tokenizer, model_layerwise):
model_layerwise.enable_scalings_logging()
inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt")
outputs = model_layerwise.generate(
input_ids=inputs.to(self.torch_device),
max_new_tokens=32,
)
assert torch.isfinite(outputs[: inputs.shape[1] :]).all()
def test_disable_adapter(self, tokenizer, model):
model.enable_scalings_logging()
inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt")
with model.disable_adapter():
outputs_disabled = model.generate(
input_ids=inputs.to(self.torch_device),
max_new_tokens=32,
)
outputs = model.generate(
input_ids=inputs.to(self.torch_device),
max_new_tokens=32,
)
assert torch.isfinite(outputs_disabled[: inputs.shape[1] :]).all()
assert torch.isfinite(outputs[: inputs.shape[1] :]).all()
assert not torch.equal(outputs, outputs_disabled)
def test_functional_embedding(self, tokenizer, embedding_model):
inputs = tokenizer.encode("Python is a", add_special_tokens=False, return_tensors="pt")
outputs = embedding_model.generate(
input_ids=inputs.to(self.torch_device),
max_new_tokens=32,
)
assert torch.isfinite(outputs[: inputs.shape[1] :]).all()
def test_xlora_loading_valid(self):
# This test also simulatenously tests the loading-from-hub functionality!
torch.manual_seed(123)
model_id = "facebook/opt-125m"
model = AutoModelForCausalLM.from_pretrained(model_id)
model.config.use_cache = False
adapters = [
"peft-internal-testing/opt-125m-dummy-lora",
"peft-internal-testing/opt-125m-dummy-lora",
]
adapters = {str(i): file_name for i, file_name in enumerate(adapters)}
peft_config = XLoraConfig(
task_type=TaskType.CAUSAL_LM,
peft_type=PeftType.XLORA,
hidden_size=model.config.hidden_size,
adapters=adapters,
xlora_depth=8,
xlora_size=2048,
layerwise_scalings=True,
xlora_dropout_p=0.2,
)
model = get_peft_model(model, peft_config)
downloaded = huggingface_hub.hf_hub_download(repo_id=adapters["0"], filename="adapter_model.safetensors")
sd = load_file(downloaded)
w0 = model.base_model.model.model.decoder.layers[0].self_attn.q_proj.lora_A["0"].weight
w1 = sd["base_model.model.model.decoder.layers.0.self_attn.q_proj.lora_A.weight"]
assert torch.allclose(w0, w1)
| peft/tests/test_xlora.py/0 | {
"file_path": "peft/tests/test_xlora.py",
"repo_id": "peft",
"token_count": 6284
} |
#!/usr/bin/env python3
""" Checkpoint Averaging Script
This script averages all model weights for checkpoints in specified path that match
the specified filter wildcard. All checkpoints must be from the exact same model.
For any hope of decent results, the checkpoints should be from the same or child
(via resumes) training session. This can be viewed as similar to maintaining running
EMA (exponential moving average) of the model weights or performing SWA (stochastic
weight averaging), but post-training.
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import torch
import argparse
import os
import glob
import hashlib
from timm.models import load_state_dict
try:
import safetensors.torch
_has_safetensors = True
except ImportError:
_has_safetensors = False
DEFAULT_OUTPUT = "./averaged.pth"
DEFAULT_SAFE_OUTPUT = "./averaged.safetensors"
parser = argparse.ArgumentParser(description='PyTorch Checkpoint Averager')
parser.add_argument('--input', default='', type=str, metavar='PATH',
help='path to base input folder containing checkpoints')
parser.add_argument('--filter', default='*.pth.tar', type=str, metavar='WILDCARD',
help='checkpoint filter (path wildcard)')
parser.add_argument('--output', default=DEFAULT_OUTPUT, type=str, metavar='PATH',
help=f'Output filename. Defaults to {DEFAULT_SAFE_OUTPUT} when passing --safetensors.')
parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true',
help='Force not using ema version of weights (if present)')
parser.add_argument('--no-sort', dest='no_sort', action='store_true',
help='Do not sort and select by checkpoint metric, also makes "n" argument irrelevant')
parser.add_argument('-n', type=int, default=10, metavar='N',
help='Number of checkpoints to average')
parser.add_argument('--safetensors', action='store_true',
help='Save weights using safetensors instead of the default torch way (pickle).')
def checkpoint_metric(checkpoint_path):
if not checkpoint_path or not os.path.isfile(checkpoint_path):
return {}
print("=> Extracting metric from checkpoint '{}'".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path, map_location='cpu')
metric = None
if 'metric' in checkpoint:
metric = checkpoint['metric']
elif 'metrics' in checkpoint and 'metric_name' in checkpoint:
metrics = checkpoint['metrics']
print(metrics)
metric = metrics[checkpoint['metric_name']]
return metric
def main():
args = parser.parse_args()
# by default use the EMA weights (if present)
args.use_ema = not args.no_use_ema
# by default sort by checkpoint metric (if present) and avg top n checkpoints
args.sort = not args.no_sort
if args.safetensors and args.output == DEFAULT_OUTPUT:
# Default path changes if using safetensors
args.output = DEFAULT_SAFE_OUTPUT
output, output_ext = os.path.splitext(args.output)
if not output_ext:
output_ext = ('.safetensors' if args.safetensors else '.pth')
output = output + output_ext
if args.safetensors and not output_ext == ".safetensors":
print(
"Warning: saving weights as safetensors but output file extension is not "
f"set to '.safetensors': {args.output}"
)
if os.path.exists(output):
print("Error: Output filename ({}) already exists.".format(output))
exit(1)
pattern = args.input
if not args.input.endswith(os.path.sep) and not args.filter.startswith(os.path.sep):
pattern += os.path.sep
pattern += args.filter
checkpoints = glob.glob(pattern, recursive=True)
if args.sort:
checkpoint_metrics = []
for c in checkpoints:
metric = checkpoint_metric(c)
if metric is not None:
checkpoint_metrics.append((metric, c))
checkpoint_metrics = list(sorted(checkpoint_metrics))
checkpoint_metrics = checkpoint_metrics[-args.n:]
if checkpoint_metrics:
print("Selected checkpoints:")
[print(m, c) for m, c in checkpoint_metrics]
avg_checkpoints = [c for m, c in checkpoint_metrics]
else:
avg_checkpoints = checkpoints
if avg_checkpoints:
print("Selected checkpoints:")
[print(c) for c in checkpoints]
if not avg_checkpoints:
print('Error: No checkpoints found to average.')
exit(1)
avg_state_dict = {}
avg_counts = {}
for c in avg_checkpoints:
new_state_dict = load_state_dict(c, args.use_ema)
if not new_state_dict:
print(f"Error: Checkpoint ({c}) doesn't exist")
continue
for k, v in new_state_dict.items():
if k not in avg_state_dict:
avg_state_dict[k] = v.clone().to(dtype=torch.float64)
avg_counts[k] = 1
else:
avg_state_dict[k] += v.to(dtype=torch.float64)
avg_counts[k] += 1
for k, v in avg_state_dict.items():
v.div_(avg_counts[k])
# float32 overflow seems unlikely based on weights seen to date, but who knows
float32_info = torch.finfo(torch.float32)
final_state_dict = {}
for k, v in avg_state_dict.items():
v = v.clamp(float32_info.min, float32_info.max)
final_state_dict[k] = v.to(dtype=torch.float32)
if args.safetensors:
assert _has_safetensors, "`pip install safetensors` to use .safetensors"
safetensors.torch.save_file(final_state_dict, output)
else:
torch.save(final_state_dict, output)
with open(output, 'rb') as f:
sha_hash = hashlib.sha256(f.read()).hexdigest()
print(f"=> Saved state_dict to '{output}, SHA256: {sha_hash}'")
if __name__ == '__main__':
main()
| pytorch-image-models/avg_checkpoints.py/0 | {
"file_path": "pytorch-image-models/avg_checkpoints.py",
"repo_id": "pytorch-image-models",
"token_count": 2377
} |
# AdvProp (EfficientNet)
**AdvProp** is an adversarial training scheme which treats adversarial examples as additional examples, to prevent overfitting. Key to the method is the usage of a separate auxiliary batch norm for adversarial examples, as they have different underlying distributions to normal examples.
The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('tf_efficientnet_b0_ap', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `tf_efficientnet_b0_ap`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('tf_efficientnet_b0_ap', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@misc{xie2020adversarial,
title={Adversarial Examples Improve Image Recognition},
author={Cihang Xie and Mingxing Tan and Boqing Gong and Jiang Wang and Alan Yuille and Quoc V. Le},
year={2020},
eprint={1911.09665},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: AdvProp
Paper:
Title: Adversarial Examples Improve Image Recognition
URL: https://paperswithcode.com/paper/adversarial-examples-improve-image
Models:
- Name: tf_efficientnet_b0_ap
In Collection: AdvProp
Metadata:
FLOPs: 488688572
Parameters: 5290000
File Size: 21385973
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AdvProp
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b0_ap
LR: 0.256
Epochs: 350
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 2048
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1334
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.1%
Top 5 Accuracy: 93.26%
- Name: tf_efficientnet_b1_ap
In Collection: AdvProp
Metadata:
FLOPs: 883633200
Parameters: 7790000
File Size: 31515350
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AdvProp
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b1_ap
LR: 0.256
Epochs: 350
Crop Pct: '0.882'
Momentum: 0.9
Batch Size: 2048
Image Size: '240'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1344
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.28%
Top 5 Accuracy: 94.3%
- Name: tf_efficientnet_b2_ap
In Collection: AdvProp
Metadata:
FLOPs: 1234321170
Parameters: 9110000
File Size: 36800745
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AdvProp
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b2_ap
LR: 0.256
Epochs: 350
Crop Pct: '0.89'
Momentum: 0.9
Batch Size: 2048
Image Size: '260'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1354
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.3%
Top 5 Accuracy: 95.03%
- Name: tf_efficientnet_b3_ap
In Collection: AdvProp
Metadata:
FLOPs: 2275247568
Parameters: 12230000
File Size: 49384538
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AdvProp
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b3_ap
LR: 0.256
Epochs: 350
Crop Pct: '0.904'
Momentum: 0.9
Batch Size: 2048
Image Size: '300'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1364
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 81.82%
Top 5 Accuracy: 95.62%
- Name: tf_efficientnet_b4_ap
In Collection: AdvProp
Metadata:
FLOPs: 5749638672
Parameters: 19340000
File Size: 77993585
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AdvProp
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b4_ap
LR: 0.256
Epochs: 350
Crop Pct: '0.922'
Momentum: 0.9
Batch Size: 2048
Image Size: '380'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1374
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 83.26%
Top 5 Accuracy: 96.39%
- Name: tf_efficientnet_b5_ap
In Collection: AdvProp
Metadata:
FLOPs: 13176501888
Parameters: 30390000
File Size: 122403150
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AdvProp
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b5_ap
LR: 0.256
Epochs: 350
Crop Pct: '0.934'
Momentum: 0.9
Batch Size: 2048
Image Size: '456'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1384
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 84.25%
Top 5 Accuracy: 96.97%
- Name: tf_efficientnet_b6_ap
In Collection: AdvProp
Metadata:
FLOPs: 24180518488
Parameters: 43040000
File Size: 173237466
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AdvProp
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b6_ap
LR: 0.256
Epochs: 350
Crop Pct: '0.942'
Momentum: 0.9
Batch Size: 2048
Image Size: '528'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1394
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 84.79%
Top 5 Accuracy: 97.14%
- Name: tf_efficientnet_b7_ap
In Collection: AdvProp
Metadata:
FLOPs: 48205304880
Parameters: 66349999
File Size: 266850607
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AdvProp
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b7_ap
LR: 0.256
Epochs: 350
Crop Pct: '0.949'
Momentum: 0.9
Batch Size: 2048
Image Size: '600'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1405
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 85.12%
Top 5 Accuracy: 97.25%
- Name: tf_efficientnet_b8_ap
In Collection: AdvProp
Metadata:
FLOPs: 80962956270
Parameters: 87410000
File Size: 351412563
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AdvProp
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b8_ap
LR: 0.128
Epochs: 350
Crop Pct: '0.954'
Momentum: 0.9
Batch Size: 2048
Image Size: '672'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1416
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 85.37%
Top 5 Accuracy: 97.3%
--> | pytorch-image-models/hfdocs/source/models/advprop.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/advprop.mdx",
"repo_id": "pytorch-image-models",
"token_count": 6034
} |
# NASNet
**NASNet** is a type of convolutional neural network discovered through neural architecture search. The building blocks consist of normal and reduction cells.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('nasnetalarge', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `nasnetalarge`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('nasnetalarge', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@misc{zoph2018learning,
title={Learning Transferable Architectures for Scalable Image Recognition},
author={Barret Zoph and Vijay Vasudevan and Jonathon Shlens and Quoc V. Le},
year={2018},
eprint={1707.07012},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: NASNet
Paper:
Title: Learning Transferable Architectures for Scalable Image Recognition
URL: https://paperswithcode.com/paper/learning-transferable-architectures-for
Models:
- Name: nasnetalarge
In Collection: NASNet
Metadata:
FLOPs: 30242402862
Parameters: 88750000
File Size: 356056626
Architecture:
- Average Pooling
- Batch Normalization
- Convolution
- Depthwise Separable Convolution
- Dropout
- ReLU
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 50x Tesla K40 GPUs
ID: nasnetalarge
Dropout: 0.5
Crop Pct: '0.911'
Momentum: 0.9
Image Size: '331'
Interpolation: bicubic
Label Smoothing: 0.1
RMSProp \\( \epsilon \\): 1.0
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/nasnet.py#L562
Weights: http://data.lip6.fr/cadene/pretrainedmodels/nasnetalarge-a1897284.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.63%
Top 5 Accuracy: 96.05%
-->
| pytorch-image-models/hfdocs/source/models/nasnet.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/nasnet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 1538
} |
# SK-ResNeXt
**SK ResNeXt** is a variant of a [ResNeXt](https://www.paperswithcode.com/method/resnext) that employs a [Selective Kernel](https://paperswithcode.com/method/selective-kernel) unit. In general, all the large kernel convolutions in the original bottleneck blocks in ResNext are replaced by the proposed [SK convolutions](https://paperswithcode.com/method/selective-kernel-convolution), enabling the network to choose appropriate receptive field sizes in an adaptive manner.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('skresnext50_32x4d', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `skresnext50_32x4d`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('skresnext50_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@misc{li2019selective,
title={Selective Kernel Networks},
author={Xiang Li and Wenhai Wang and Xiaolin Hu and Jian Yang},
year={2019},
eprint={1903.06586},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: SKResNeXt
Paper:
Title: Selective Kernel Networks
URL: https://paperswithcode.com/paper/selective-kernel-networks
Models:
- Name: skresnext50_32x4d
In Collection: SKResNeXt
Metadata:
FLOPs: 5739845824
Parameters: 27480000
File Size: 110340975
Architecture:
- Convolution
- Dense Connections
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- Residual Connection
- Selective Kernel
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: skresnext50_32x4d
LR: 0.1
Epochs: 100
Layers: 50
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/sknet.py#L210
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnext50_ra-f40e40bf.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.15%
Top 5 Accuracy: 94.64%
--> | pytorch-image-models/hfdocs/source/models/skresnext.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/skresnext.mdx",
"repo_id": "pytorch-image-models",
"token_count": 1645
} |
from abc import abstractmethod
class Reader:
def __init__(self):
pass
@abstractmethod
def _filename(self, index, basename=False, absolute=False):
pass
def filename(self, index, basename=False, absolute=False):
return self._filename(index, basename=basename, absolute=absolute)
def filenames(self, basename=False, absolute=False):
return [self._filename(index, basename=basename, absolute=absolute) for index in range(len(self))]
| pytorch-image-models/timm/data/readers/reader.py/0 | {
"file_path": "pytorch-image-models/timm/data/readers/reader.py",
"repo_id": "pytorch-image-models",
"token_count": 171
} |
""" Activations (memory-efficient w/ custom autograd)
A collection of activations fn and modules with a common interface so that they can
easily be swapped. All have an `inplace` arg even if not used.
These activations are not compatible with jit scripting or ONNX export of the model, please use
basic versions of the activations.
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch import nn as nn
from torch.nn import functional as F
def swish_fwd(x):
return x.mul(torch.sigmoid(x))
def swish_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid)))
class SwishAutoFn(torch.autograd.Function):
""" optimised Swish w/ memory-efficient checkpoint
Inspired by conversation btw Jeremy Howard & Adam Pazske
https://twitter.com/jeremyphoward/status/1188251041835315200
"""
@staticmethod
def symbolic(g, x):
return g.op("Mul", x, g.op("Sigmoid", x))
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return swish_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return swish_bwd(x, grad_output)
def swish_me(x, inplace=False):
return SwishAutoFn.apply(x)
class SwishMe(nn.Module):
def __init__(self, inplace: bool = False):
super(SwishMe, self).__init__()
def forward(self, x):
return SwishAutoFn.apply(x)
def mish_fwd(x):
return x.mul(torch.tanh(F.softplus(x)))
def mish_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
x_tanh_sp = F.softplus(x).tanh()
return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp))
class MishAutoFn(torch.autograd.Function):
""" Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
A memory efficient variant of Mish
"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return mish_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return mish_bwd(x, grad_output)
def mish_me(x, inplace=False):
return MishAutoFn.apply(x)
class MishMe(nn.Module):
def __init__(self, inplace: bool = False):
super(MishMe, self).__init__()
def forward(self, x):
return MishAutoFn.apply(x)
def hard_sigmoid_fwd(x, inplace: bool = False):
return (x + 3).clamp(min=0, max=6).div(6.)
def hard_sigmoid_bwd(x, grad_output):
m = torch.ones_like(x) * ((x >= -3.) & (x <= 3.)) / 6.
return grad_output * m
class HardSigmoidAutoFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return hard_sigmoid_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return hard_sigmoid_bwd(x, grad_output)
def hard_sigmoid_me(x, inplace: bool = False):
return HardSigmoidAutoFn.apply(x)
class HardSigmoidMe(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSigmoidMe, self).__init__()
def forward(self, x):
return HardSigmoidAutoFn.apply(x)
def hard_swish_fwd(x):
return x * (x + 3).clamp(min=0, max=6).div(6.)
def hard_swish_bwd(x, grad_output):
m = torch.ones_like(x) * (x >= 3.)
m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m)
return grad_output * m
class HardSwishAutoFn(torch.autograd.Function):
"""A memory efficient HardSwish activation"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return hard_swish_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return hard_swish_bwd(x, grad_output)
@staticmethod
def symbolic(g, self):
input = g.op("Add", self, g.op('Constant', value_t=torch.tensor(3, dtype=torch.float)))
hardtanh_ = g.op("Clip", input, g.op('Constant', value_t=torch.tensor(0, dtype=torch.float)), g.op('Constant', value_t=torch.tensor(6, dtype=torch.float)))
hardtanh_ = g.op("Div", hardtanh_, g.op('Constant', value_t=torch.tensor(6, dtype=torch.float)))
return g.op("Mul", self, hardtanh_)
def hard_swish_me(x, inplace=False):
return HardSwishAutoFn.apply(x)
class HardSwishMe(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSwishMe, self).__init__()
def forward(self, x):
return HardSwishAutoFn.apply(x)
def hard_mish_fwd(x):
return 0.5 * x * (x + 2).clamp(min=0, max=2)
def hard_mish_bwd(x, grad_output):
m = torch.ones_like(x) * (x >= -2.)
m = torch.where((x >= -2.) & (x <= 0.), x + 1., m)
return grad_output * m
class HardMishAutoFn(torch.autograd.Function):
""" A memory efficient variant of Hard Mish
Experimental, based on notes by Mish author Diganta Misra at
https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return hard_mish_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return hard_mish_bwd(x, grad_output)
def hard_mish_me(x, inplace: bool = False):
return HardMishAutoFn.apply(x)
class HardMishMe(nn.Module):
def __init__(self, inplace: bool = False):
super(HardMishMe, self).__init__()
def forward(self, x):
return HardMishAutoFn.apply(x)
| pytorch-image-models/timm/layers/activations_me.py/0 | {
"file_path": "pytorch-image-models/timm/layers/activations_me.py",
"repo_id": "pytorch-image-models",
"token_count": 2424
} |
""" Norm Layer Factory
Create norm modules by string (to mirror create_act and creat_norm-act fns)
Copyright 2022 Ross Wightman
"""
import functools
import types
from typing import Type
import torch.nn as nn
from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, RmsNorm, RmsNorm2d, SimpleNorm, SimpleNorm2d
from torchvision.ops.misc import FrozenBatchNorm2d
_NORM_MAP = dict(
batchnorm=nn.BatchNorm2d,
batchnorm2d=nn.BatchNorm2d,
batchnorm1d=nn.BatchNorm1d,
groupnorm=GroupNorm,
groupnorm1=GroupNorm1,
layernorm=LayerNorm,
layernorm2d=LayerNorm2d,
rmsnorm=RmsNorm,
rmsnorm2d=RmsNorm2d,
simplenorm=SimpleNorm,
simplenorm2d=SimpleNorm2d,
frozenbatchnorm2d=FrozenBatchNorm2d,
)
_NORM_TYPES = {m for n, m in _NORM_MAP.items()}
def create_norm_layer(layer_name, num_features, **kwargs):
layer = get_norm_layer(layer_name)
layer_instance = layer(num_features, **kwargs)
return layer_instance
def get_norm_layer(norm_layer):
if norm_layer is None:
return None
assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial))
norm_kwargs = {}
# unbind partial fn, so args can be rebound later
if isinstance(norm_layer, functools.partial):
norm_kwargs.update(norm_layer.keywords)
norm_layer = norm_layer.func
if isinstance(norm_layer, str):
if not norm_layer:
return None
layer_name = norm_layer.replace('_', '').lower()
norm_layer = _NORM_MAP[layer_name]
else:
norm_layer = norm_layer
if norm_kwargs:
norm_layer = functools.partial(norm_layer, **norm_kwargs) # bind/rebind args
return norm_layer
| pytorch-image-models/timm/layers/create_norm.py/0 | {
"file_path": "pytorch-image-models/timm/layers/create_norm.py",
"repo_id": "pytorch-image-models",
"token_count": 688
} |
""" Interpolation helpers for timm layers
RegularGridInterpolator from https://github.com/sbarratt/torch_interpolations
Copyright Shane Barratt, Apache 2.0 license
"""
import torch
from itertools import product
class RegularGridInterpolator:
""" Interpolate data defined on a rectilinear grid with even or uneven spacing.
Produces similar results to scipy RegularGridInterpolator or interp2d
in 'linear' mode.
Taken from https://github.com/sbarratt/torch_interpolations
"""
def __init__(self, points, values):
self.points = points
self.values = values
assert isinstance(self.points, tuple) or isinstance(self.points, list)
assert isinstance(self.values, torch.Tensor)
self.ms = list(self.values.shape)
self.n = len(self.points)
assert len(self.ms) == self.n
for i, p in enumerate(self.points):
assert isinstance(p, torch.Tensor)
assert p.shape[0] == self.values.shape[i]
def __call__(self, points_to_interp):
assert self.points is not None
assert self.values is not None
assert len(points_to_interp) == len(self.points)
K = points_to_interp[0].shape[0]
for x in points_to_interp:
assert x.shape[0] == K
idxs = []
dists = []
overalls = []
for p, x in zip(self.points, points_to_interp):
idx_right = torch.bucketize(x, p)
idx_right[idx_right >= p.shape[0]] = p.shape[0] - 1
idx_left = (idx_right - 1).clamp(0, p.shape[0] - 1)
dist_left = x - p[idx_left]
dist_right = p[idx_right] - x
dist_left[dist_left < 0] = 0.
dist_right[dist_right < 0] = 0.
both_zero = (dist_left == 0) & (dist_right == 0)
dist_left[both_zero] = dist_right[both_zero] = 1.
idxs.append((idx_left, idx_right))
dists.append((dist_left, dist_right))
overalls.append(dist_left + dist_right)
numerator = 0.
for indexer in product([0, 1], repeat=self.n):
as_s = [idx[onoff] for onoff, idx in zip(indexer, idxs)]
bs_s = [dist[1 - onoff] for onoff, dist in zip(indexer, dists)]
numerator += self.values[as_s] * \
torch.prod(torch.stack(bs_s), dim=0)
denominator = torch.prod(torch.stack(overalls), dim=0)
return numerator / denominator
| pytorch-image-models/timm/layers/interpolate.py/0 | {
"file_path": "pytorch-image-models/timm/layers/interpolate.py",
"repo_id": "pytorch-image-models",
"token_count": 1121
} |
""" Relative position embedding modules and functions
Hacked together by / Copyright 2022 Ross Wightman
"""
import math
import os
from typing import Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from .grid import ndgrid
from .interpolate import RegularGridInterpolator
from .mlp import Mlp
from .weight_init import trunc_normal_
_USE_SCIPY = int(os.environ.get('TIMM_USE_SCIPY_INTERP', 0)) > 0
def gen_relative_position_index(
q_size: Tuple[int, int],
k_size: Optional[Tuple[int, int]] = None,
class_token: bool = False,
) -> torch.Tensor:
# Adapted with significant modifications from Swin / BeiT codebases
# get pair-wise relative position index for each token inside the window
assert k_size is None, 'Different q & k sizes not currently supported' # FIXME
coords = torch.stack(ndgrid(torch.arange(q_size[0]), torch.arange(q_size[1]))).flatten(1) # 2, Wh, Ww
relative_coords = coords[:, :, None] - coords[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0) # Qh*Qw, Kh*Kw, 2
relative_coords[:, :, 0] += q_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += q_size[1] - 1
relative_coords[:, :, 0] *= 2 * q_size[1] - 1
num_relative_distance = (2 * q_size[0] - 1) * (2 * q_size[1] - 1)
# else:
# # FIXME different q vs k sizes is a WIP, need to better offset the two grids?
# q_coords = torch.stack(
# ndgrid(
# torch.arange(q_size[0]),
# torch.arange(q_size[1])
# )
# ).flatten(1) # 2, Wh, Ww
# k_coords = torch.stack(
# ndgrid(
# torch.arange(k_size[0]),
# torch.arange(k_size[1])
# )
# ).flatten(1)
# relative_coords = q_coords[:, :, None] - k_coords[:, None, :] # 2, Wh*Ww, Wh*Ww
# relative_coords = relative_coords.permute(1, 2, 0) # Qh*Qw, Kh*Kw, 2
# relative_coords[:, :, 0] += max(q_size[0], k_size[0]) - 1 # shift to start from 0
# relative_coords[:, :, 1] += max(q_size[1], k_size[1]) - 1
# relative_coords[:, :, 0] *= k_size[1] + q_size[1] - 1
# relative_position_index = relative_coords.sum(-1) # Qh*Qw, Kh*Kw
# num_relative_distance = (q_size[0] + k_size[0] - 1) * (q_size[1] + k_size[1] - 1) + 3
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
if class_token:
# handle cls to token & token 2 cls & cls to cls as per beit for rel pos bias
# NOTE not intended or tested with MLP log-coords
relative_position_index = F.pad(relative_position_index, [1, 0, 1, 0])
relative_position_index[0, 0:] = num_relative_distance
relative_position_index[0:, 0] = num_relative_distance + 1
relative_position_index[0, 0] = num_relative_distance + 2
return relative_position_index.contiguous()
def resize_rel_pos_bias_table_simple(
rel_pos_bias,
new_window_size: Tuple[int, int],
new_bias_shape: Tuple[int, ...],
):
dst_size = (new_window_size[0] * 2 - 1, new_window_size[1] * 2 - 1)
if rel_pos_bias.ndim == 3:
# TF maxvit style (num_heads, H, W) bias shape, no extra tokens currently supported
_, dst_h, dst_w = new_bias_shape
num_attn_heads, src_h, src_w = rel_pos_bias.shape
assert dst_h == dst_size[0] and dst_w == dst_size[1]
if src_h != dst_h or src_w != dst_w:
rel_pos_bias = torch.nn.functional.interpolate(
rel_pos_bias.unsqueeze(0),
size=dst_size,
mode="bicubic",
align_corners=False,
).squeeze(0)
else:
assert rel_pos_bias.ndim == 2
# (num_pos, num_heads) (aka flat) bias shape
dst_num_pos, _ = new_bias_shape
src_num_pos, num_attn_heads = rel_pos_bias.shape
num_extra_tokens = dst_num_pos - (dst_size[0] * dst_size[1])
src_size = int((src_num_pos - num_extra_tokens) ** 0.5)
src_size = (src_size, src_size) # FIXME could support non-equal src if argument passed
if src_size[0] != dst_size[0] or src_size[1] != dst_size[1]:
if num_extra_tokens:
extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
else:
extra_tokens = None
rel_pos_bias = torch.nn.functional.interpolate(
rel_pos_bias.transpose(1, 0).reshape((1, -1, src_size[0], src_size[1])),
size=dst_size,
mode="bicubic",
align_corners=False,
).view(-1, dst_num_pos - num_extra_tokens).transpose(0, 1)
if extra_tokens is not None:
rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)
return rel_pos_bias
def resize_rel_pos_bias_table_levit(
position_bias_table,
new_size,
interpolation: str = 'bicubic',
antialias: bool = True,
):
"""
Resample relative position bias table suggested in LeVit
Adapted from: https://github.com/microsoft/Cream/blob/main/TinyViT/utils.py
"""
L1, nH1 = position_bias_table.size()
L2, nH2 = new_size
assert nH1 == nH2
if L1 != L2:
orig_dtype = position_bias_table.dtype
position_bias_table = position_bias_table.float()
# bicubic interpolate relative_position_bias_table if not match
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
relative_position_bias_table_resized = F.interpolate(
position_bias_table.permute(1, 0).view(1, nH1, S1, S1),
size=(S2, S2),
mode=interpolation,
antialias=antialias)
relative_position_bias_table_resized = \
relative_position_bias_table_resized.view(nH2, L2).permute(1, 0)
relative_position_bias_table_resized.to(orig_dtype)
return relative_position_bias_table_resized
else:
return position_bias_table
def resize_rel_pos_bias_table(
rel_pos_bias,
new_window_size: Tuple[int, int],
new_bias_shape: Tuple[int, ...],
):
""" Resize relative position bias table using more advanced interpolation.
Modified from code in Microsoft Unilm (https://github.com/microsoft/unilm) repo (BeiT, BeiT-v2, etc).
https://github.com/microsoft/unilm/blob/5255d52de86dad642810f5849dd357769346c1d7/beit/run_class_finetuning.py#L351
Args:
rel_pos_bias:
new_window_size:
new_bias_shape:
Returns:
"""
if _USE_SCIPY:
from scipy import interpolate
dst_size = (new_window_size[0] * 2 - 1, new_window_size[1] * 2 - 1)
if rel_pos_bias.ndim == 3:
# TF maxvit style (num_heads, H, W) bias shape, no extra tokens currently supported
num_extra_tokens = 0
_, dst_h, dst_w = new_bias_shape
assert dst_h == dst_size[0] and dst_w == dst_size[1]
num_attn_heads, src_h, src_w = rel_pos_bias.shape
src_size = (src_h, src_w)
has_flat_shape = False
else:
assert rel_pos_bias.ndim == 2
# (num_pos, num_heads) (aka flat) bias shape
dst_num_pos, _ = new_bias_shape
src_num_pos, num_attn_heads = rel_pos_bias.shape
num_extra_tokens = dst_num_pos - (dst_size[0] * dst_size[1])
src_size = int((src_num_pos - num_extra_tokens) ** 0.5)
src_size = (src_size, src_size)
has_flat_shape = True
if src_size[0] != dst_size[0] or src_size[1] != dst_size[1]:
# print("Interpolating position from %dx%d to %dx%d" % (src_size[0], src_size[1], dst_size[0], dst_size[1]))
if num_extra_tokens:
extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
else:
extra_tokens = None
def geometric_progression(a, r, n):
return a * (1.0 - r ** n) / (1.0 - r)
def _calc(src, dst):
left, right = 1.01, 1.5
while right - left > 1e-6:
q = (left + right) / 2.0
gp = geometric_progression(1, q, src // 2)
if gp > dst // 2:
right = q
else:
left = q
dis = []
cur = 1
for i in range(src // 2):
dis.append(cur)
cur += q ** (i + 1)
r_ids = [-_ for _ in reversed(dis)]
return r_ids + [0] + dis
y = _calc(src_size[0], dst_size[0])
x = _calc(src_size[1], dst_size[1])
yx = [torch.tensor(y), torch.tensor(x)]
# print("Original positions = %s" % str(x))
ty = dst_size[0] // 2.0
tx = dst_size[1] // 2.0
dy = torch.arange(-ty, ty + 0.1, 1.0)
dx = torch.arange(-tx, tx + 0.1, 1.0)
dyx = ndgrid(dy, dx)
# print("Target positions = %s" % str(dx))
all_rel_pos_bias = []
for i in range(num_attn_heads):
if has_flat_shape:
z = rel_pos_bias[:, i].view(src_size[0], src_size[1]).float()
else:
z = rel_pos_bias[i, :, :].float()
if _USE_SCIPY:
# Original beit code uses scipy w/ cubic interpolation
f = interpolate.interp2d(x, y, z.numpy(), kind='cubic')
r = torch.Tensor(f(dx, dy)).contiguous().to(rel_pos_bias.device)
else:
# Without scipy dependency, I've found a reasonably simple impl
# that supports uneven spaced interpolation pts with 'linear' interp.
# Results are comparable to scipy for model accuracy in most cases.
f = RegularGridInterpolator(yx, z)
r = f(dyx).contiguous().to(rel_pos_bias.device)
if has_flat_shape:
r = r.view(-1, 1)
all_rel_pos_bias.append(r)
if has_flat_shape:
rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)
else:
rel_pos_bias = torch.cat(all_rel_pos_bias, dim=0)
if extra_tokens is not None:
assert has_flat_shape
rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)
return rel_pos_bias
class RelPosBias(nn.Module):
""" Relative Position Bias
Adapted from Swin-V1 relative position bias impl, modularized.
"""
def __init__(self, window_size, num_heads, prefix_tokens=0):
super().__init__()
assert prefix_tokens <= 1
self.window_size = window_size
self.window_area = window_size[0] * window_size[1]
self.bias_shape = (self.window_area + prefix_tokens,) * 2 + (num_heads,)
num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 * prefix_tokens
self.relative_position_bias_table = nn.Parameter(torch.zeros(num_relative_distance, num_heads))
self.register_buffer(
"relative_position_index",
gen_relative_position_index(self.window_size, class_token=prefix_tokens > 0).view(-1),
persistent=False,
)
self.init_weights()
def init_weights(self):
trunc_normal_(self.relative_position_bias_table, std=.02)
def get_bias(self) -> torch.Tensor:
relative_position_bias = self.relative_position_bias_table[self.relative_position_index]
# win_h * win_w, win_h * win_w, num_heads
relative_position_bias = relative_position_bias.view(self.bias_shape).permute(2, 0, 1)
return relative_position_bias.unsqueeze(0).contiguous()
def forward(self, attn, shared_rel_pos: Optional[torch.Tensor] = None):
return attn + self.get_bias()
def gen_relative_log_coords(
win_size: Tuple[int, int],
pretrained_win_size: Tuple[int, int] = (0, 0),
mode='swin',
):
assert mode in ('swin', 'cr')
# as per official swin-v2 impl, supporting timm specific 'cr' log coords as well
relative_coords_h = torch.arange(-(win_size[0] - 1), win_size[0]).to(torch.float32)
relative_coords_w = torch.arange(-(win_size[1] - 1), win_size[1]).to(torch.float32)
relative_coords_table = torch.stack(ndgrid(relative_coords_h, relative_coords_w))
relative_coords_table = relative_coords_table.permute(1, 2, 0).contiguous() # 2*Wh-1, 2*Ww-1, 2
if mode == 'swin':
if pretrained_win_size[0] > 0:
relative_coords_table[:, :, 0] /= (pretrained_win_size[0] - 1)
relative_coords_table[:, :, 1] /= (pretrained_win_size[1] - 1)
else:
relative_coords_table[:, :, 0] /= (win_size[0] - 1)
relative_coords_table[:, :, 1] /= (win_size[1] - 1)
relative_coords_table *= 8 # normalize to -8, 8
relative_coords_table = torch.sign(relative_coords_table) * torch.log2(
1.0 + relative_coords_table.abs()) / math.log2(8)
else:
# mode == 'cr'
relative_coords_table = torch.sign(relative_coords_table) * torch.log(
1.0 + relative_coords_table.abs())
return relative_coords_table
class RelPosMlp(nn.Module):
""" Log-Coordinate Relative Position MLP
Based on ideas presented in Swin-V2 paper (https://arxiv.org/abs/2111.09883)
This impl covers the 'swin' implementation as well as two timm specific modes ('cr', and 'rw')
"""
def __init__(
self,
window_size,
num_heads=8,
hidden_dim=128,
prefix_tokens=0,
mode='cr',
pretrained_window_size=(0, 0)
):
super().__init__()
self.window_size = window_size
self.window_area = self.window_size[0] * self.window_size[1]
self.prefix_tokens = prefix_tokens
self.num_heads = num_heads
self.bias_shape = (self.window_area,) * 2 + (num_heads,)
if mode == 'swin':
self.bias_act = nn.Sigmoid()
self.bias_gain = 16
mlp_bias = (True, False)
else:
self.bias_act = nn.Identity()
self.bias_gain = None
mlp_bias = True
self.mlp = Mlp(
2, # x, y
hidden_features=hidden_dim,
out_features=num_heads,
act_layer=nn.ReLU,
bias=mlp_bias,
drop=(0.125, 0.)
)
self.register_buffer(
"relative_position_index",
gen_relative_position_index(window_size).view(-1),
persistent=False)
# get relative_coords_table
self.register_buffer(
"rel_coords_log",
gen_relative_log_coords(window_size, pretrained_window_size, mode=mode),
persistent=False)
def get_bias(self) -> torch.Tensor:
relative_position_bias = self.mlp(self.rel_coords_log)
if self.relative_position_index is not None:
relative_position_bias = relative_position_bias.view(-1, self.num_heads)[self.relative_position_index]
relative_position_bias = relative_position_bias.view(self.bias_shape)
relative_position_bias = relative_position_bias.permute(2, 0, 1)
relative_position_bias = self.bias_act(relative_position_bias)
if self.bias_gain is not None:
relative_position_bias = self.bias_gain * relative_position_bias
if self.prefix_tokens:
relative_position_bias = F.pad(relative_position_bias, [self.prefix_tokens, 0, self.prefix_tokens, 0])
return relative_position_bias.unsqueeze(0).contiguous()
def forward(self, attn, shared_rel_pos: Optional[torch.Tensor] = None):
return attn + self.get_bias()
def generate_lookup_tensor(
length: int,
max_relative_position: Optional[int] = None,
):
"""Generate a one_hot lookup tensor to reindex embeddings along one dimension.
Args:
length: the length to reindex to.
max_relative_position: the maximum relative position to consider.
Relative position embeddings for distances above this threshold
are zeroed out.
Returns:
a lookup Tensor of size [length, length, vocab_size] that satisfies
ret[n,m,v] = 1{m - n + max_relative_position = v}.
"""
if max_relative_position is None:
max_relative_position = length - 1
# Return the cached lookup tensor, otherwise compute it and cache it.
vocab_size = 2 * max_relative_position + 1
ret = torch.zeros(length, length, vocab_size)
for i in range(length):
for x in range(length):
v = x - i + max_relative_position
if abs(x - i) > max_relative_position:
continue
ret[i, x, v] = 1
return ret
def reindex_2d_einsum_lookup(
relative_position_tensor,
height: int,
width: int,
height_lookup: torch.Tensor,
width_lookup: torch.Tensor,
) -> torch.Tensor:
"""Reindex 2d relative position bias with 2 independent einsum lookups.
Adapted from:
https://github.com/google-research/maxvit/blob/2e06a7f1f70c76e64cd3dabe5cd1b8c1a23c9fb7/maxvit/models/attention_utils.py
Args:
relative_position_tensor: tensor of shape
[..., vocab_height, vocab_width, ...].
height: height to reindex to.
width: width to reindex to.
height_lookup: one-hot height lookup
width_lookup: one-hot width lookup
Returns:
reindexed_tensor: a Tensor of shape
[..., height * width, height * width, ...]
"""
reindexed_tensor = torch.einsum('nhw,ixh->nixw', relative_position_tensor, height_lookup)
reindexed_tensor = torch.einsum('nixw,jyw->nijxy', reindexed_tensor, width_lookup)
area = height * width
return reindexed_tensor.reshape(relative_position_tensor.shape[0], area, area)
class RelPosBiasTf(nn.Module):
""" Relative Position Bias Impl (Compatible with Tensorflow MaxViT models)
Adapted from:
https://github.com/google-research/maxvit/blob/2e06a7f1f70c76e64cd3dabe5cd1b8c1a23c9fb7/maxvit/models/attention_utils.py
"""
def __init__(self, window_size, num_heads, prefix_tokens=0):
super().__init__()
assert prefix_tokens <= 1
self.window_size = window_size
self.window_area = window_size[0] * window_size[1]
self.num_heads = num_heads
vocab_height = 2 * window_size[0] - 1
vocab_width = 2 * window_size[1] - 1
self.bias_shape = (self.num_heads, vocab_height, vocab_width)
self.relative_position_bias_table = nn.Parameter(torch.zeros(self.bias_shape))
self.register_buffer('height_lookup', generate_lookup_tensor(window_size[0]), persistent=False)
self.register_buffer('width_lookup', generate_lookup_tensor(window_size[1]), persistent=False)
self.init_weights()
def init_weights(self):
nn.init.normal_(self.relative_position_bias_table, std=.02)
def get_bias(self) -> torch.Tensor:
# FIXME change to not use one-hot/einsum?
return reindex_2d_einsum_lookup(
self.relative_position_bias_table,
self.window_size[0],
self.window_size[1],
self.height_lookup,
self.width_lookup
)
def forward(self, attn, shared_rel_pos: Optional[torch.Tensor] = None):
return attn + self.get_bias()
| pytorch-image-models/timm/layers/pos_embed_rel.py/0 | {
"file_path": "pytorch-image-models/timm/layers/pos_embed_rel.py",
"repo_id": "pytorch-image-models",
"token_count": 9303
} |
""" Cross Entropy w/ smoothing or soft targets
Hacked together by / Copyright 2021 Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class LabelSmoothingCrossEntropy(nn.Module):
""" NLL loss with label smoothing.
"""
def __init__(self, smoothing=0.1):
super(LabelSmoothingCrossEntropy, self).__init__()
assert smoothing < 1.0
self.smoothing = smoothing
self.confidence = 1. - smoothing
def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
logprobs = F.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
class SoftTargetCrossEntropy(nn.Module):
def __init__(self):
super(SoftTargetCrossEntropy, self).__init__()
def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1)
return loss.mean()
| pytorch-image-models/timm/loss/cross_entropy.py/0 | {
"file_path": "pytorch-image-models/timm/loss/cross_entropy.py",
"repo_id": "pytorch-image-models",
"token_count": 470
} |
"""Pytorch Densenet implementation w/ tweaks
This file is a copy of https://github.com/pytorch/vision 'densenet.py' (BSD-3-Clause) with
fixed kwargs passthrough and addition of dynamic global avg/max pool.
"""
import re
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.jit.annotations import List
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import BatchNormAct2d, get_norm_act_layer, BlurPool2d, create_classifier
from ._builder import build_model_with_cfg
from ._manipulate import MATCH_PREV_GROUP, checkpoint
from ._registry import register_model, generate_default_cfgs, register_model_deprecations
__all__ = ['DenseNet']
class DenseLayer(nn.Module):
def __init__(
self,
num_input_features,
growth_rate,
bn_size,
norm_layer=BatchNormAct2d,
drop_rate=0.,
grad_checkpointing=False,
):
super(DenseLayer, self).__init__()
self.add_module('norm1', norm_layer(num_input_features)),
self.add_module('conv1', nn.Conv2d(
num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)),
self.add_module('norm2', norm_layer(bn_size * growth_rate)),
self.add_module('conv2', nn.Conv2d(
bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)),
self.drop_rate = float(drop_rate)
self.grad_checkpointing = grad_checkpointing
def bottleneck_fn(self, xs):
# type: (List[torch.Tensor]) -> torch.Tensor
concated_features = torch.cat(xs, 1)
bottleneck_output = self.conv1(self.norm1(concated_features)) # noqa: T484
return bottleneck_output
# todo: rewrite when torchscript supports any
def any_requires_grad(self, x):
# type: (List[torch.Tensor]) -> bool
for tensor in x:
if tensor.requires_grad:
return True
return False
@torch.jit.unused # noqa: T484
def call_checkpoint_bottleneck(self, x):
# type: (List[torch.Tensor]) -> torch.Tensor
def closure(*xs):
return self.bottleneck_fn(xs)
return checkpoint(closure, *x)
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (List[torch.Tensor]) -> (torch.Tensor)
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (torch.Tensor) -> (torch.Tensor)
pass
# torchscript does not yet support *args, so we overload method
# allowing it to take either a List[Tensor] or single Tensor
def forward(self, x): # noqa: F811
if isinstance(x, torch.Tensor):
prev_features = [x]
else:
prev_features = x
if self.grad_checkpointing and self.any_requires_grad(prev_features):
if torch.jit.is_scripting():
raise Exception("Memory Efficient not supported in JIT")
bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
else:
bottleneck_output = self.bottleneck_fn(prev_features)
new_features = self.conv2(self.norm2(bottleneck_output))
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return new_features
class DenseBlock(nn.ModuleDict):
_version = 2
def __init__(
self,
num_layers,
num_input_features,
bn_size,
growth_rate,
norm_layer=BatchNormAct2d,
drop_rate=0.,
grad_checkpointing=False,
):
super(DenseBlock, self).__init__()
for i in range(num_layers):
layer = DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
norm_layer=norm_layer,
drop_rate=drop_rate,
grad_checkpointing=grad_checkpointing,
)
self.add_module('denselayer%d' % (i + 1), layer)
def forward(self, init_features):
features = [init_features]
for name, layer in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
class DenseTransition(nn.Sequential):
def __init__(
self,
num_input_features,
num_output_features,
norm_layer=BatchNormAct2d,
aa_layer=None,
):
super(DenseTransition, self).__init__()
self.add_module('norm', norm_layer(num_input_features))
self.add_module('conv', nn.Conv2d(
num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
if aa_layer is not None:
self.add_module('pool', aa_layer(num_output_features, stride=2))
else:
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate before classifier layer
proj_drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
def __init__(
self,
growth_rate=32,
block_config=(6, 12, 24, 16),
num_classes=1000,
in_chans=3,
global_pool='avg',
bn_size=4,
stem_type='',
act_layer='relu',
norm_layer='batchnorm2d',
aa_layer=None,
drop_rate=0.,
proj_drop_rate=0.,
memory_efficient=False,
aa_stem_only=True,
):
self.num_classes = num_classes
super(DenseNet, self).__init__()
norm_layer = get_norm_act_layer(norm_layer, act_layer=act_layer)
# Stem
deep_stem = 'deep' in stem_type # 3x3 deep stem
num_init_features = growth_rate * 2
if aa_layer is None:
stem_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
else:
stem_pool = nn.Sequential(*[
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
aa_layer(channels=num_init_features, stride=2)])
if deep_stem:
stem_chs_1 = stem_chs_2 = growth_rate
if 'tiered' in stem_type:
stem_chs_1 = 3 * (growth_rate // 4)
stem_chs_2 = num_init_features if 'narrow' in stem_type else 6 * (growth_rate // 4)
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(in_chans, stem_chs_1, 3, stride=2, padding=1, bias=False)),
('norm0', norm_layer(stem_chs_1)),
('conv1', nn.Conv2d(stem_chs_1, stem_chs_2, 3, stride=1, padding=1, bias=False)),
('norm1', norm_layer(stem_chs_2)),
('conv2', nn.Conv2d(stem_chs_2, num_init_features, 3, stride=1, padding=1, bias=False)),
('norm2', norm_layer(num_init_features)),
('pool0', stem_pool),
]))
else:
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(in_chans, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', norm_layer(num_init_features)),
('pool0', stem_pool),
]))
self.feature_info = [
dict(num_chs=num_init_features, reduction=2, module=f'features.norm{2 if deep_stem else 0}')]
current_stride = 4
# DenseBlocks
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
norm_layer=norm_layer,
drop_rate=proj_drop_rate,
grad_checkpointing=memory_efficient,
)
module_name = f'denseblock{(i + 1)}'
self.features.add_module(module_name, block)
num_features = num_features + num_layers * growth_rate
transition_aa_layer = None if aa_stem_only else aa_layer
if i != len(block_config) - 1:
self.feature_info += [
dict(num_chs=num_features, reduction=current_stride, module='features.' + module_name)]
current_stride *= 2
trans = DenseTransition(
num_input_features=num_features,
num_output_features=num_features // 2,
norm_layer=norm_layer,
aa_layer=transition_aa_layer,
)
self.features.add_module(f'transition{i + 1}', trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', norm_layer(num_features))
self.feature_info += [dict(num_chs=num_features, reduction=current_stride, module='features.norm5')]
self.num_features = self.head_hidden_size = num_features
# Linear layer
global_pool, classifier = create_classifier(
self.num_features,
self.num_classes,
pool_type=global_pool,
)
self.global_pool = global_pool
self.head_drop = nn.Dropout(drop_rate)
self.classifier = classifier
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^features\.conv[012]|features\.norm[012]|features\.pool[012]',
blocks=r'^features\.(?:denseblock|transition)(\d+)' if coarse else [
(r'^features\.denseblock(\d+)\.denselayer(\d+)', None),
(r'^features\.transition(\d+)', MATCH_PREV_GROUP) # FIXME combine with previous denselayer
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for b in self.features.modules():
if isinstance(b, DenseLayer):
b.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.classifier
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
self.global_pool, self.classifier = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
return self.features(x)
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.classifier(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _filter_torchvision_pretrained(state_dict):
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
return state_dict
def _create_densenet(variant, growth_rate, block_config, pretrained, **kwargs):
kwargs['growth_rate'] = growth_rate
kwargs['block_config'] = block_config
return build_model_with_cfg(
DenseNet,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True),
pretrained_filter_fn=_filter_torchvision_pretrained,
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'features.conv0', 'classifier': 'classifier', **kwargs,
}
default_cfgs = generate_default_cfgs({
'densenet121.ra_in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'densenetblur121d.ra_in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'densenet264d.untrained': _cfg(),
'densenet121.tv_in1k': _cfg(hf_hub_id='timm/'),
'densenet169.tv_in1k': _cfg(hf_hub_id='timm/'),
'densenet201.tv_in1k': _cfg(hf_hub_id='timm/'),
'densenet161.tv_in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def densenet121(pretrained=False, **kwargs) -> DenseNet:
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model_args = dict(growth_rate=32, block_config=(6, 12, 24, 16))
model = _create_densenet('densenet121', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def densenetblur121d(pretrained=False, **kwargs) -> DenseNet:
r"""Densenet-121 w/ blur-pooling & 3-layer 3x3 stem
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model_args = dict(growth_rate=32, block_config=(6, 12, 24, 16), stem_type='deep', aa_layer=BlurPool2d)
model = _create_densenet('densenetblur121d', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def densenet169(pretrained=False, **kwargs) -> DenseNet:
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model_args = dict(growth_rate=32, block_config=(6, 12, 32, 32))
model = _create_densenet('densenet169', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def densenet201(pretrained=False, **kwargs) -> DenseNet:
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model_args = dict(growth_rate=32, block_config=(6, 12, 48, 32))
model = _create_densenet('densenet201', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def densenet161(pretrained=False, **kwargs) -> DenseNet:
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model_args = dict(growth_rate=48, block_config=(6, 12, 36, 24))
model = _create_densenet('densenet161', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def densenet264d(pretrained=False, **kwargs) -> DenseNet:
r"""Densenet-264 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model_args = dict(growth_rate=48, block_config=(6, 12, 64, 48), stem_type='deep')
model = _create_densenet('densenet264d', pretrained=pretrained, **dict(model_args, **kwargs))
return model
register_model_deprecations(__name__, {
'tv_densenet121': 'densenet121.tv_in1k',
})
| pytorch-image-models/timm/models/densenet.py/0 | {
"file_path": "pytorch-image-models/timm/models/densenet.py",
"repo_id": "pytorch-image-models",
"token_count": 7591
} |
"""
An implementation of GhostNet & GhostNetV2 Models as defined in:
GhostNet: More Features from Cheap Operations. https://arxiv.org/abs/1911.11907
GhostNetV2: Enhance Cheap Operation with Long-Range Attention. https://proceedings.neurips.cc/paper_files/paper/2022/file/40b60852a4abdaa696b5a1a78da34635-Paper-Conference.pdf
The train script & code of models at:
Original model: https://github.com/huawei-noah/CV-backbones/tree/master/ghostnet_pytorch
Original model: https://github.com/huawei-noah/Efficient-AI-Backbones/blob/master/ghostnetv2_pytorch/model/ghostnetv2_torch.py
"""
import math
from functools import partial
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SelectAdaptivePool2d, Linear, make_divisible
from ._builder import build_model_with_cfg
from ._efficientnet_blocks import SqueezeExcite, ConvBnAct
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['GhostNet']
_SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4))
class GhostModule(nn.Module):
def __init__(
self,
in_chs,
out_chs,
kernel_size=1,
ratio=2,
dw_size=3,
stride=1,
use_act=True,
act_layer=nn.ReLU,
):
super(GhostModule, self).__init__()
self.out_chs = out_chs
init_chs = math.ceil(out_chs / ratio)
new_chs = init_chs * (ratio - 1)
self.primary_conv = nn.Sequential(
nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False),
nn.BatchNorm2d(init_chs),
act_layer(inplace=True) if use_act else nn.Identity(),
)
self.cheap_operation = nn.Sequential(
nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size//2, groups=init_chs, bias=False),
nn.BatchNorm2d(new_chs),
act_layer(inplace=True) if use_act else nn.Identity(),
)
def forward(self, x):
x1 = self.primary_conv(x)
x2 = self.cheap_operation(x1)
out = torch.cat([x1, x2], dim=1)
return out[:, :self.out_chs, :, :]
class GhostModuleV2(nn.Module):
def __init__(
self,
in_chs,
out_chs,
kernel_size=1,
ratio=2,
dw_size=3,
stride=1,
use_act=True,
act_layer=nn.ReLU,
):
super().__init__()
self.gate_fn = nn.Sigmoid()
self.out_chs = out_chs
init_chs = math.ceil(out_chs / ratio)
new_chs = init_chs * (ratio - 1)
self.primary_conv = nn.Sequential(
nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False),
nn.BatchNorm2d(init_chs),
act_layer(inplace=True) if use_act else nn.Identity(),
)
self.cheap_operation = nn.Sequential(
nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size // 2, groups=init_chs, bias=False),
nn.BatchNorm2d(new_chs),
act_layer(inplace=True) if use_act else nn.Identity(),
)
self.short_conv = nn.Sequential(
nn.Conv2d(in_chs, out_chs, kernel_size, stride, kernel_size // 2, bias=False),
nn.BatchNorm2d(out_chs),
nn.Conv2d(out_chs, out_chs, kernel_size=(1, 5), stride=1, padding=(0, 2), groups=out_chs, bias=False),
nn.BatchNorm2d(out_chs),
nn.Conv2d(out_chs, out_chs, kernel_size=(5, 1), stride=1, padding=(2, 0), groups=out_chs, bias=False),
nn.BatchNorm2d(out_chs),
)
def forward(self, x):
res = self.short_conv(F.avg_pool2d(x, kernel_size=2, stride=2))
x1 = self.primary_conv(x)
x2 = self.cheap_operation(x1)
out = torch.cat([x1, x2], dim=1)
return out[:, :self.out_chs, :, :] * F.interpolate(
self.gate_fn(res), size=(out.shape[-2], out.shape[-1]), mode='nearest')
class GhostBottleneck(nn.Module):
""" Ghost bottleneck w/ optional SE"""
def __init__(
self,
in_chs,
mid_chs,
out_chs,
dw_kernel_size=3,
stride=1,
act_layer=nn.ReLU,
se_ratio=0.,
mode='original',
):
super(GhostBottleneck, self).__init__()
has_se = se_ratio is not None and se_ratio > 0.
self.stride = stride
# Point-wise expansion
if mode == 'original':
self.ghost1 = GhostModule(in_chs, mid_chs, use_act=True, act_layer=act_layer)
else:
self.ghost1 = GhostModuleV2(in_chs, mid_chs, use_act=True, act_layer=act_layer)
# Depth-wise convolution
if self.stride > 1:
self.conv_dw = nn.Conv2d(
mid_chs, mid_chs, dw_kernel_size, stride=stride,
padding=(dw_kernel_size-1)//2, groups=mid_chs, bias=False)
self.bn_dw = nn.BatchNorm2d(mid_chs)
else:
self.conv_dw = None
self.bn_dw = None
# Squeeze-and-excitation
self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None
# Point-wise linear projection
self.ghost2 = GhostModule(mid_chs, out_chs, use_act=False)
# shortcut
if in_chs == out_chs and self.stride == 1:
self.shortcut = nn.Sequential()
else:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_chs, in_chs, dw_kernel_size, stride=stride,
padding=(dw_kernel_size-1)//2, groups=in_chs, bias=False),
nn.BatchNorm2d(in_chs),
nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_chs),
)
def forward(self, x):
shortcut = x
# 1st ghost bottleneck
x = self.ghost1(x)
# Depth-wise convolution
if self.conv_dw is not None:
x = self.conv_dw(x)
x = self.bn_dw(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# 2nd ghost bottleneck
x = self.ghost2(x)
x += self.shortcut(shortcut)
return x
class GhostNet(nn.Module):
def __init__(
self,
cfgs,
num_classes=1000,
width=1.0,
in_chans=3,
output_stride=32,
global_pool='avg',
drop_rate=0.2,
version='v1',
):
super(GhostNet, self).__init__()
# setting of inverted residual blocks
assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported'
self.cfgs = cfgs
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
self.feature_info = []
# building first layer
stem_chs = make_divisible(16 * width, 4)
self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False)
self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem'))
self.bn1 = nn.BatchNorm2d(stem_chs)
self.act1 = nn.ReLU(inplace=True)
prev_chs = stem_chs
# building inverted residual blocks
stages = nn.ModuleList([])
stage_idx = 0
layer_idx = 0
net_stride = 2
for cfg in self.cfgs:
layers = []
s = 1
for k, exp_size, c, se_ratio, s in cfg:
out_chs = make_divisible(c * width, 4)
mid_chs = make_divisible(exp_size * width, 4)
layer_kwargs = {}
if version == 'v2' and layer_idx > 1:
layer_kwargs['mode'] = 'attn'
layers.append(GhostBottleneck(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio, **layer_kwargs))
prev_chs = out_chs
layer_idx += 1
if s > 1:
net_stride *= 2
self.feature_info.append(dict(
num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}'))
stages.append(nn.Sequential(*layers))
stage_idx += 1
out_chs = make_divisible(exp_size * width, 4)
stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1)))
self.pool_dim = prev_chs = out_chs
self.blocks = nn.Sequential(*stages)
# building last several layers
self.num_features = prev_chs
self.head_hidden_size = out_chs = 1280
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True)
self.act2 = nn.ReLU(inplace=True)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity()
# FIXME init
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^conv_stem|bn1',
blocks=[
(r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None),
(r'conv_head', (99999,))
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.classifier
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
# cannot meaningfully change pooling of efficient head after creation
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x, flatten=True)
else:
x = self.blocks(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.conv_head(x)
x = self.act2(x)
x = self.flatten(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
return x if pre_logits else self.classifier(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model: nn.Module):
out_dict = {}
for k, v in state_dict.items():
if 'total' in k:
continue
out_dict[k] = v
return out_dict
def _create_ghostnet(variant, width=1.0, pretrained=False, **kwargs):
"""
Constructs a GhostNet model
"""
cfgs = [
# k, t, c, SE, s
# stage1
[[3, 16, 16, 0, 1]],
# stage2
[[3, 48, 24, 0, 2]],
[[3, 72, 24, 0, 1]],
# stage3
[[5, 72, 40, 0.25, 2]],
[[5, 120, 40, 0.25, 1]],
# stage4
[[3, 240, 80, 0, 2]],
[[3, 200, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 480, 112, 0.25, 1],
[3, 672, 112, 0.25, 1]
],
# stage5
[[5, 672, 160, 0.25, 2]],
[[5, 960, 160, 0, 1],
[5, 960, 160, 0.25, 1],
[5, 960, 160, 0, 1],
[5, 960, 160, 0.25, 1]
]
]
model_kwargs = dict(
cfgs=cfgs,
width=width,
**kwargs,
)
return build_model_with_cfg(
GhostNet,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True),
**model_kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
**kwargs
}
default_cfgs = generate_default_cfgs({
'ghostnet_050.untrained': _cfg(),
'ghostnet_100.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/huawei-noah/CV-backbones/releases/download/ghostnet_pth/ghostnet_1x.pth'
),
'ghostnet_130.untrained': _cfg(),
'ghostnetv2_100.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/GhostNetV2/ck_ghostnetv2_10.pth.tar'
),
'ghostnetv2_130.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/GhostNetV2/ck_ghostnetv2_13.pth.tar'
),
'ghostnetv2_160.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/GhostNetV2/ck_ghostnetv2_16.pth.tar'
),
})
@register_model
def ghostnet_050(pretrained=False, **kwargs) -> GhostNet:
""" GhostNet-0.5x """
model = _create_ghostnet('ghostnet_050', width=0.5, pretrained=pretrained, **kwargs)
return model
@register_model
def ghostnet_100(pretrained=False, **kwargs) -> GhostNet:
""" GhostNet-1.0x """
model = _create_ghostnet('ghostnet_100', width=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def ghostnet_130(pretrained=False, **kwargs) -> GhostNet:
""" GhostNet-1.3x """
model = _create_ghostnet('ghostnet_130', width=1.3, pretrained=pretrained, **kwargs)
return model
@register_model
def ghostnetv2_100(pretrained=False, **kwargs) -> GhostNet:
""" GhostNetV2-1.0x """
model = _create_ghostnet('ghostnetv2_100', width=1.0, pretrained=pretrained, version='v2', **kwargs)
return model
@register_model
def ghostnetv2_130(pretrained=False, **kwargs) -> GhostNet:
""" GhostNetV2-1.3x """
model = _create_ghostnet('ghostnetv2_130', width=1.3, pretrained=pretrained, version='v2', **kwargs)
return model
@register_model
def ghostnetv2_160(pretrained=False, **kwargs) -> GhostNet:
""" GhostNetV2-1.6x """
model = _create_ghostnet('ghostnetv2_160', width=1.6, pretrained=pretrained, version='v2', **kwargs)
return model
| pytorch-image-models/timm/models/ghostnet.py/0 | {
"file_path": "pytorch-image-models/timm/models/ghostnet.py",
"repo_id": "pytorch-image-models",
"token_count": 7473
} |
"""
Poolformer from MetaFormer is Actually What You Need for Vision https://arxiv.org/abs/2111.11418
IdentityFormer, RandFormer, PoolFormerV2, ConvFormer, and CAFormer
from MetaFormer Baselines for Vision https://arxiv.org/abs/2210.13452
All implemented models support feature extraction and variable input resolution.
Original implementation by Weihao Yu et al.,
adapted for timm by Fredo Guan and Ross Wightman.
Adapted from https://github.com/sail-sg/metaformer, original copyright below
"""
# Copyright 2022 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from functools import partial
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.jit import Final
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import trunc_normal_, DropPath, SelectAdaptivePool2d, GroupNorm1, LayerNorm, LayerNorm2d, Mlp, \
use_fused_attn
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['MetaFormer']
class Stem(nn.Module):
"""
Stem implemented by a layer of convolution.
Conv2d params constant across all models.
"""
def __init__(
self,
in_channels,
out_channels,
norm_layer=None,
):
super().__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=7,
stride=4,
padding=2
)
self.norm = norm_layer(out_channels) if norm_layer else nn.Identity()
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
return x
class Downsampling(nn.Module):
"""
Downsampling implemented by a layer of convolution.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
norm_layer=None,
):
super().__init__()
self.norm = norm_layer(in_channels) if norm_layer else nn.Identity()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding
)
def forward(self, x):
x = self.norm(x)
x = self.conv(x)
return x
class Scale(nn.Module):
"""
Scale vector by element multiplications.
"""
def __init__(self, dim, init_value=1.0, trainable=True, use_nchw=True):
super().__init__()
self.shape = (dim, 1, 1) if use_nchw else (dim,)
self.scale = nn.Parameter(init_value * torch.ones(dim), requires_grad=trainable)
def forward(self, x):
return x * self.scale.view(self.shape)
class SquaredReLU(nn.Module):
"""
Squared ReLU: https://arxiv.org/abs/2109.08668
"""
def __init__(self, inplace=False):
super().__init__()
self.relu = nn.ReLU(inplace=inplace)
def forward(self, x):
return torch.square(self.relu(x))
class StarReLU(nn.Module):
"""
StarReLU: s * relu(x) ** 2 + b
"""
def __init__(
self,
scale_value=1.0,
bias_value=0.0,
scale_learnable=True,
bias_learnable=True,
mode=None,
inplace=False
):
super().__init__()
self.inplace = inplace
self.relu = nn.ReLU(inplace=inplace)
self.scale = nn.Parameter(scale_value * torch.ones(1), requires_grad=scale_learnable)
self.bias = nn.Parameter(bias_value * torch.ones(1), requires_grad=bias_learnable)
def forward(self, x):
return self.scale * self.relu(x) ** 2 + self.bias
class Attention(nn.Module):
"""
Vanilla self-attention from Transformer: https://arxiv.org/abs/1706.03762.
Modified from timm.
"""
fused_attn: Final[bool]
def __init__(
self,
dim,
head_dim=32,
num_heads=None,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
proj_bias=False,
**kwargs
):
super().__init__()
self.head_dim = head_dim
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.num_heads = num_heads if num_heads else dim // head_dim
if self.num_heads == 0:
self.num_heads = 1
self.attention_dim = self.num_heads * self.head_dim
self.qkv = nn.Linear(dim, self.attention_dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(self.attention_dim, dim, bias=proj_bias)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
if self.fused_attn:
x = F.scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
# custom norm modules that disable the bias term, since the original models defs
# used a custom norm with a weight term but no bias term.
class GroupNorm1NoBias(GroupNorm1):
def __init__(self, num_channels, **kwargs):
super().__init__(num_channels, **kwargs)
self.eps = kwargs.get('eps', 1e-6)
self.bias = None
class LayerNorm2dNoBias(LayerNorm2d):
def __init__(self, num_channels, **kwargs):
super().__init__(num_channels, **kwargs)
self.eps = kwargs.get('eps', 1e-6)
self.bias = None
class LayerNormNoBias(nn.LayerNorm):
def __init__(self, num_channels, **kwargs):
super().__init__(num_channels, **kwargs)
self.eps = kwargs.get('eps', 1e-6)
self.bias = None
class SepConv(nn.Module):
r"""
Inverted separable convolution from MobileNetV2: https://arxiv.org/abs/1801.04381.
"""
def __init__(
self,
dim,
expansion_ratio=2,
act1_layer=StarReLU,
act2_layer=nn.Identity,
bias=False,
kernel_size=7,
padding=3,
**kwargs
):
super().__init__()
mid_channels = int(expansion_ratio * dim)
self.pwconv1 = nn.Conv2d(dim, mid_channels, kernel_size=1, bias=bias)
self.act1 = act1_layer()
self.dwconv = nn.Conv2d(
mid_channels, mid_channels, kernel_size=kernel_size,
padding=padding, groups=mid_channels, bias=bias) # depthwise conv
self.act2 = act2_layer()
self.pwconv2 = nn.Conv2d(mid_channels, dim, kernel_size=1, bias=bias)
def forward(self, x):
x = self.pwconv1(x)
x = self.act1(x)
x = self.dwconv(x)
x = self.act2(x)
x = self.pwconv2(x)
return x
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer: https://arxiv.org/abs/2111.11418
"""
def __init__(self, pool_size=3, **kwargs):
super().__init__()
self.pool = nn.AvgPool2d(
pool_size, stride=1, padding=pool_size // 2, count_include_pad=False)
def forward(self, x):
y = self.pool(x)
return y - x
class MlpHead(nn.Module):
""" MLP classification head
"""
def __init__(
self,
dim,
num_classes=1000,
mlp_ratio=4,
act_layer=SquaredReLU,
norm_layer=LayerNorm,
drop_rate=0.,
bias=True
):
super().__init__()
hidden_features = int(mlp_ratio * dim)
self.fc1 = nn.Linear(dim, hidden_features, bias=bias)
self.act = act_layer()
self.norm = norm_layer(hidden_features)
self.fc2 = nn.Linear(hidden_features, num_classes, bias=bias)
self.head_drop = nn.Dropout(drop_rate)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.norm(x)
x = self.head_drop(x)
x = self.fc2(x)
return x
class MetaFormerBlock(nn.Module):
"""
Implementation of one MetaFormer block.
"""
def __init__(
self,
dim,
token_mixer=Pooling,
mlp_act=StarReLU,
mlp_bias=False,
norm_layer=LayerNorm2d,
proj_drop=0.,
drop_path=0.,
use_nchw=True,
layer_scale_init_value=None,
res_scale_init_value=None,
**kwargs
):
super().__init__()
ls_layer = partial(Scale, dim=dim, init_value=layer_scale_init_value, use_nchw=use_nchw)
rs_layer = partial(Scale, dim=dim, init_value=res_scale_init_value, use_nchw=use_nchw)
self.norm1 = norm_layer(dim)
self.token_mixer = token_mixer(dim=dim, proj_drop=proj_drop, **kwargs)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.layer_scale1 = ls_layer() if layer_scale_init_value is not None else nn.Identity()
self.res_scale1 = rs_layer() if res_scale_init_value is not None else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
dim,
int(4 * dim),
act_layer=mlp_act,
bias=mlp_bias,
drop=proj_drop,
use_conv=use_nchw,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.layer_scale2 = ls_layer() if layer_scale_init_value is not None else nn.Identity()
self.res_scale2 = rs_layer() if res_scale_init_value is not None else nn.Identity()
def forward(self, x):
x = self.res_scale1(x) + \
self.layer_scale1(
self.drop_path1(
self.token_mixer(self.norm1(x))
)
)
x = self.res_scale2(x) + \
self.layer_scale2(
self.drop_path2(
self.mlp(self.norm2(x))
)
)
return x
class MetaFormerStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
depth=2,
token_mixer=nn.Identity,
mlp_act=StarReLU,
mlp_bias=False,
downsample_norm=LayerNorm2d,
norm_layer=LayerNorm2d,
proj_drop=0.,
dp_rates=[0.] * 2,
layer_scale_init_value=None,
res_scale_init_value=None,
**kwargs,
):
super().__init__()
self.grad_checkpointing = False
self.use_nchw = not issubclass(token_mixer, Attention)
# don't downsample if in_chs and out_chs are the same
self.downsample = nn.Identity() if in_chs == out_chs else Downsampling(
in_chs,
out_chs,
kernel_size=3,
stride=2,
padding=1,
norm_layer=downsample_norm,
)
self.blocks = nn.Sequential(*[MetaFormerBlock(
dim=out_chs,
token_mixer=token_mixer,
mlp_act=mlp_act,
mlp_bias=mlp_bias,
norm_layer=norm_layer,
proj_drop=proj_drop,
drop_path=dp_rates[i],
layer_scale_init_value=layer_scale_init_value,
res_scale_init_value=res_scale_init_value,
use_nchw=self.use_nchw,
**kwargs,
) for i in range(depth)])
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
def forward(self, x: Tensor):
x = self.downsample(x)
B, C, H, W = x.shape
if not self.use_nchw:
x = x.reshape(B, C, -1).transpose(1, 2)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
if not self.use_nchw:
x = x.transpose(1, 2).reshape(B, C, H, W)
return x
class MetaFormer(nn.Module):
r""" MetaFormer
A PyTorch impl of : `MetaFormer Baselines for Vision` -
https://arxiv.org/abs/2210.13452
Args:
in_chans (int): Number of input image channels.
num_classes (int): Number of classes for classification head.
global_pool: Pooling for classifier head.
depths (list or tuple): Number of blocks at each stage.
dims (list or tuple): Feature dimension at each stage.
token_mixers (list, tuple or token_fcn): Token mixer for each stage.
mlp_act: Activation layer for MLP.
mlp_bias (boolean): Enable or disable mlp bias term.
drop_path_rate (float): Stochastic depth rate.
drop_rate (float): Dropout rate.
layer_scale_init_values (list, tuple, float or None): Init value for Layer Scale.
None means not use the layer scale. Form: https://arxiv.org/abs/2103.17239.
res_scale_init_values (list, tuple, float or None): Init value for res Scale on residual connections.
None means not use the res scale. From: https://arxiv.org/abs/2110.09456.
downsample_norm (nn.Module): Norm layer used in stem and downsampling layers.
norm_layers (list, tuple or norm_fcn): Norm layers for each stage.
output_norm: Norm layer before classifier head.
use_mlp_head: Use MLP classification head.
"""
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
depths=(2, 2, 6, 2),
dims=(64, 128, 320, 512),
token_mixers=Pooling,
mlp_act=StarReLU,
mlp_bias=False,
drop_path_rate=0.,
proj_drop_rate=0.,
drop_rate=0.0,
layer_scale_init_values=None,
res_scale_init_values=(None, None, 1.0, 1.0),
downsample_norm=LayerNorm2dNoBias,
norm_layers=LayerNorm2dNoBias,
output_norm=LayerNorm2d,
use_mlp_head=True,
**kwargs,
):
super().__init__()
self.num_classes = num_classes
self.num_features = dims[-1]
self.drop_rate = drop_rate
self.use_mlp_head = use_mlp_head
self.num_stages = len(depths)
# convert everything to lists if they aren't indexable
if not isinstance(depths, (list, tuple)):
depths = [depths] # it means the model has only one stage
if not isinstance(dims, (list, tuple)):
dims = [dims]
if not isinstance(token_mixers, (list, tuple)):
token_mixers = [token_mixers] * self.num_stages
if not isinstance(norm_layers, (list, tuple)):
norm_layers = [norm_layers] * self.num_stages
if not isinstance(layer_scale_init_values, (list, tuple)):
layer_scale_init_values = [layer_scale_init_values] * self.num_stages
if not isinstance(res_scale_init_values, (list, tuple)):
res_scale_init_values = [res_scale_init_values] * self.num_stages
self.grad_checkpointing = False
self.feature_info = []
self.stem = Stem(
in_chans,
dims[0],
norm_layer=downsample_norm
)
stages = []
prev_dim = dims[0]
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
for i in range(self.num_stages):
stages += [MetaFormerStage(
prev_dim,
dims[i],
depth=depths[i],
token_mixer=token_mixers[i],
mlp_act=mlp_act,
mlp_bias=mlp_bias,
proj_drop=proj_drop_rate,
dp_rates=dp_rates[i],
layer_scale_init_value=layer_scale_init_values[i],
res_scale_init_value=res_scale_init_values[i],
downsample_norm=downsample_norm,
norm_layer=norm_layers[i],
**kwargs,
)]
prev_dim = dims[i]
self.feature_info += [dict(num_chs=dims[i], reduction=2**(i+2), module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
# if using MlpHead, dropout is handled by MlpHead
if num_classes > 0:
if self.use_mlp_head:
# FIXME not actually returning mlp hidden state right now as pre-logits.
final = MlpHead(self.num_features, num_classes, drop_rate=self.drop_rate)
self.head_hidden_size = self.num_features
else:
final = nn.Linear(self.num_features, num_classes)
self.head_hidden_size = self.num_features
else:
final = nn.Identity()
self.head = nn.Sequential(OrderedDict([
('global_pool', SelectAdaptivePool2d(pool_type=global_pool)),
('norm', output_norm(self.num_features)),
('flatten', nn.Flatten(1) if global_pool else nn.Identity()),
('drop', nn.Dropout(drop_rate) if self.use_mlp_head else nn.Identity()),
('fc', final)
]))
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
for stage in self.stages:
stage.set_grad_checkpointing(enable=enable)
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
self.head.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.head.flatten = nn.Flatten(1) if global_pool else nn.Identity()
if num_classes > 0:
if self.use_mlp_head:
final = MlpHead(self.num_features, num_classes, drop_rate=self.drop_rate)
else:
final = nn.Linear(self.num_features, num_classes)
else:
final = nn.Identity()
self.head.fc = final
def forward_head(self, x: Tensor, pre_logits: bool = False):
# NOTE nn.Sequential in head broken down since can't call head[:-1](x) in torchscript :(
x = self.head.global_pool(x)
x = self.head.norm(x)
x = self.head.flatten(x)
x = self.head.drop(x)
return x if pre_logits else self.head.fc(x)
def forward_features(self, x: Tensor):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
return x
def forward(self, x: Tensor):
x = self.forward_features(x)
x = self.forward_head(x)
return x
# this works but it's long and breaks backwards compatibility with weights from the poolformer-only impl
def checkpoint_filter_fn(state_dict, model):
if 'stem.conv.weight' in state_dict:
return state_dict
import re
out_dict = {}
is_poolformerv1 = 'network.0.0.mlp.fc1.weight' in state_dict
model_state_dict = model.state_dict()
for k, v in state_dict.items():
if is_poolformerv1:
k = re.sub(r'layer_scale_([0-9]+)', r'layer_scale\1.scale', k)
k = k.replace('network.1', 'downsample_layers.1')
k = k.replace('network.3', 'downsample_layers.2')
k = k.replace('network.5', 'downsample_layers.3')
k = k.replace('network.2', 'network.1')
k = k.replace('network.4', 'network.2')
k = k.replace('network.6', 'network.3')
k = k.replace('network', 'stages')
k = re.sub(r'downsample_layers.([0-9]+)', r'stages.\1.downsample', k)
k = k.replace('downsample.proj', 'downsample.conv')
k = k.replace('patch_embed.proj', 'patch_embed.conv')
k = re.sub(r'([0-9]+).([0-9]+)', r'\1.blocks.\2', k)
k = k.replace('stages.0.downsample', 'patch_embed')
k = k.replace('patch_embed', 'stem')
k = k.replace('post_norm', 'norm')
k = k.replace('pre_norm', 'norm')
k = re.sub(r'^head', 'head.fc', k)
k = re.sub(r'^norm', 'head.norm', k)
if v.shape != model_state_dict[k] and v.numel() == model_state_dict[k].numel():
v = v.reshape(model_state_dict[k].shape)
out_dict[k] = v
return out_dict
def _create_metaformer(variant, pretrained=False, **kwargs):
default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (2, 2, 6, 2))))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
MetaFormer,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 1.0, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'classifier': 'head.fc', 'first_conv': 'stem.conv',
**kwargs
}
default_cfgs = generate_default_cfgs({
'poolformer_s12.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9),
'poolformer_s24.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9),
'poolformer_s36.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9),
'poolformer_m36.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95),
'poolformer_m48.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95),
'poolformerv2_s12.sail_in1k': _cfg(hf_hub_id='timm/'),
'poolformerv2_s24.sail_in1k': _cfg(hf_hub_id='timm/'),
'poolformerv2_s36.sail_in1k': _cfg(hf_hub_id='timm/'),
'poolformerv2_m36.sail_in1k': _cfg(hf_hub_id='timm/'),
'poolformerv2_m48.sail_in1k': _cfg(hf_hub_id='timm/'),
'convformer_s18.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_s18.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_s18.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_s18.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_s18.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'convformer_s36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_s36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_s36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_s36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_s36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'convformer_m36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_m36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_m36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_m36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_m36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'convformer_b36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_b36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_b36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_b36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_b36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'caformer_s18.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_s18.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_s18.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_s18.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_s18.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'caformer_s36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_s36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_s36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_s36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_s36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'caformer_m36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_m36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_m36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_m36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_m36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'caformer_b36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_b36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_b36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_b36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_b36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
})
@register_model
def poolformer_s12(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[2, 2, 6, 2],
dims=[64, 128, 320, 512],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-5,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_s12', pretrained=pretrained, **model_kwargs)
@register_model
def poolformer_s24(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[4, 4, 12, 4],
dims=[64, 128, 320, 512],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-5,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_s24', pretrained=pretrained, **model_kwargs)
@register_model
def poolformer_s36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[6, 6, 18, 6],
dims=[64, 128, 320, 512],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-6,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_s36', pretrained=pretrained, **model_kwargs)
@register_model
def poolformer_m36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[6, 6, 18, 6],
dims=[96, 192, 384, 768],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-6,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_m36', pretrained=pretrained, **model_kwargs)
@register_model
def poolformer_m48(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[8, 8, 24, 8],
dims=[96, 192, 384, 768],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-6,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_m48', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_s12(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[2, 2, 6, 2],
dims=[64, 128, 320, 512],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_s12', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_s24(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[4, 4, 12, 4],
dims=[64, 128, 320, 512],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_s24', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_s36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[6, 6, 18, 6],
dims=[64, 128, 320, 512],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_s36', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_m36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[6, 6, 18, 6],
dims=[96, 192, 384, 768],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_m36', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_m48(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[8, 8, 24, 8],
dims=[96, 192, 384, 768],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_m48', pretrained=pretrained, **model_kwargs)
@register_model
def convformer_s18(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 3, 9, 3],
dims=[64, 128, 320, 512],
token_mixers=SepConv,
norm_layers=LayerNorm2dNoBias,
**kwargs)
return _create_metaformer('convformer_s18', pretrained=pretrained, **model_kwargs)
@register_model
def convformer_s36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[64, 128, 320, 512],
token_mixers=SepConv,
norm_layers=LayerNorm2dNoBias,
**kwargs)
return _create_metaformer('convformer_s36', pretrained=pretrained, **model_kwargs)
@register_model
def convformer_m36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[96, 192, 384, 576],
token_mixers=SepConv,
norm_layers=LayerNorm2dNoBias,
**kwargs)
return _create_metaformer('convformer_m36', pretrained=pretrained, **model_kwargs)
@register_model
def convformer_b36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[128, 256, 512, 768],
token_mixers=SepConv,
norm_layers=LayerNorm2dNoBias,
**kwargs)
return _create_metaformer('convformer_b36', pretrained=pretrained, **model_kwargs)
@register_model
def caformer_s18(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 3, 9, 3],
dims=[64, 128, 320, 512],
token_mixers=[SepConv, SepConv, Attention, Attention],
norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2,
**kwargs)
return _create_metaformer('caformer_s18', pretrained=pretrained, **model_kwargs)
@register_model
def caformer_s36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[64, 128, 320, 512],
token_mixers=[SepConv, SepConv, Attention, Attention],
norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2,
**kwargs)
return _create_metaformer('caformer_s36', pretrained=pretrained, **model_kwargs)
@register_model
def caformer_m36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[96, 192, 384, 576],
token_mixers=[SepConv, SepConv, Attention, Attention],
norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2,
**kwargs)
return _create_metaformer('caformer_m36', pretrained=pretrained, **model_kwargs)
@register_model
def caformer_b36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[128, 256, 512, 768],
token_mixers=[SepConv, SepConv, Attention, Attention],
norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2,
**kwargs)
return _create_metaformer('caformer_b36', pretrained=pretrained, **model_kwargs)
| pytorch-image-models/timm/models/metaformer.py/0 | {
"file_path": "pytorch-image-models/timm/models/metaformer.py",
"repo_id": "pytorch-image-models",
"token_count": 17650
} |
""" RepViT
Paper: `RepViT: Revisiting Mobile CNN From ViT Perspective`
- https://arxiv.org/abs/2307.09283
@misc{wang2023repvit,
title={RepViT: Revisiting Mobile CNN From ViT Perspective},
author={Ao Wang and Hui Chen and Zijia Lin and Hengjun Pu and Guiguang Ding},
year={2023},
eprint={2307.09283},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
Adapted from official impl at https://github.com/jameslahm/RepViT
"""
__all__ = ['RepVit']
from typing import Optional
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SqueezeExcite, trunc_normal_, to_ntuple, to_2tuple
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
class ConvNorm(nn.Sequential):
def __init__(self, in_dim, out_dim, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1):
super().__init__()
self.add_module('c', nn.Conv2d(in_dim, out_dim, ks, stride, pad, dilation, groups, bias=False))
self.add_module('bn', nn.BatchNorm2d(out_dim))
nn.init.constant_(self.bn.weight, bn_weight_init)
nn.init.constant_(self.bn.bias, 0)
@torch.no_grad()
def fuse(self):
c, bn = self._modules.values()
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
w = c.weight * w[:, None, None, None]
b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5
m = nn.Conv2d(
w.size(1) * self.c.groups,
w.size(0),
w.shape[2:],
stride=self.c.stride,
padding=self.c.padding,
dilation=self.c.dilation,
groups=self.c.groups,
device=c.weight.device,
)
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
class NormLinear(nn.Sequential):
def __init__(self, in_dim, out_dim, bias=True, std=0.02):
super().__init__()
self.add_module('bn', nn.BatchNorm1d(in_dim))
self.add_module('l', nn.Linear(in_dim, out_dim, bias=bias))
trunc_normal_(self.l.weight, std=std)
if bias:
nn.init.constant_(self.l.bias, 0)
@torch.no_grad()
def fuse(self):
bn, l = self._modules.values()
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5
w = l.weight * w[None, :]
if l.bias is None:
b = b @ self.l.weight.T
else:
b = (l.weight @ b[:, None]).view(-1) + self.l.bias
m = nn.Linear(w.size(1), w.size(0), device=l.weight.device)
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
class RepVggDw(nn.Module):
def __init__(self, ed, kernel_size, legacy=False):
super().__init__()
self.conv = ConvNorm(ed, ed, kernel_size, 1, (kernel_size - 1) // 2, groups=ed)
if legacy:
self.conv1 = ConvNorm(ed, ed, 1, 1, 0, groups=ed)
# Make torchscript happy.
self.bn = nn.Identity()
else:
self.conv1 = nn.Conv2d(ed, ed, 1, 1, 0, groups=ed)
self.bn = nn.BatchNorm2d(ed)
self.dim = ed
self.legacy = legacy
def forward(self, x):
return self.bn(self.conv(x) + self.conv1(x) + x)
@torch.no_grad()
def fuse(self):
conv = self.conv.fuse()
if self.legacy:
conv1 = self.conv1.fuse()
else:
conv1 = self.conv1
conv_w = conv.weight
conv_b = conv.bias
conv1_w = conv1.weight
conv1_b = conv1.bias
conv1_w = nn.functional.pad(conv1_w, [1, 1, 1, 1])
identity = nn.functional.pad(
torch.ones(conv1_w.shape[0], conv1_w.shape[1], 1, 1, device=conv1_w.device), [1, 1, 1, 1]
)
final_conv_w = conv_w + conv1_w + identity
final_conv_b = conv_b + conv1_b
conv.weight.data.copy_(final_conv_w)
conv.bias.data.copy_(final_conv_b)
if not self.legacy:
bn = self.bn
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
w = conv.weight * w[:, None, None, None]
b = bn.bias + (conv.bias - bn.running_mean) * bn.weight / (bn.running_var + bn.eps) ** 0.5
conv.weight.data.copy_(w)
conv.bias.data.copy_(b)
return conv
class RepVitMlp(nn.Module):
def __init__(self, in_dim, hidden_dim, act_layer):
super().__init__()
self.conv1 = ConvNorm(in_dim, hidden_dim, 1, 1, 0)
self.act = act_layer()
self.conv2 = ConvNorm(hidden_dim, in_dim, 1, 1, 0, bn_weight_init=0)
def forward(self, x):
return self.conv2(self.act(self.conv1(x)))
class RepViTBlock(nn.Module):
def __init__(self, in_dim, mlp_ratio, kernel_size, use_se, act_layer, legacy=False):
super(RepViTBlock, self).__init__()
self.token_mixer = RepVggDw(in_dim, kernel_size, legacy)
self.se = SqueezeExcite(in_dim, 0.25) if use_se else nn.Identity()
self.channel_mixer = RepVitMlp(in_dim, in_dim * mlp_ratio, act_layer)
def forward(self, x):
x = self.token_mixer(x)
x = self.se(x)
identity = x
x = self.channel_mixer(x)
return identity + x
class RepVitStem(nn.Module):
def __init__(self, in_chs, out_chs, act_layer):
super().__init__()
self.conv1 = ConvNorm(in_chs, out_chs // 2, 3, 2, 1)
self.act1 = act_layer()
self.conv2 = ConvNorm(out_chs // 2, out_chs, 3, 2, 1)
self.stride = 4
def forward(self, x):
return self.conv2(self.act1(self.conv1(x)))
class RepVitDownsample(nn.Module):
def __init__(self, in_dim, mlp_ratio, out_dim, kernel_size, act_layer, legacy=False):
super().__init__()
self.pre_block = RepViTBlock(in_dim, mlp_ratio, kernel_size, use_se=False, act_layer=act_layer, legacy=legacy)
self.spatial_downsample = ConvNorm(in_dim, in_dim, kernel_size, 2, (kernel_size - 1) // 2, groups=in_dim)
self.channel_downsample = ConvNorm(in_dim, out_dim, 1, 1)
self.ffn = RepVitMlp(out_dim, out_dim * mlp_ratio, act_layer)
def forward(self, x):
x = self.pre_block(x)
x = self.spatial_downsample(x)
x = self.channel_downsample(x)
identity = x
x = self.ffn(x)
return x + identity
class RepVitClassifier(nn.Module):
def __init__(self, dim, num_classes, distillation=False, drop=0.0):
super().__init__()
self.head_drop = nn.Dropout(drop)
self.head = NormLinear(dim, num_classes) if num_classes > 0 else nn.Identity()
self.distillation = distillation
self.distilled_training = False
self.num_classes = num_classes
if distillation:
self.head_dist = NormLinear(dim, num_classes) if num_classes > 0 else nn.Identity()
def forward(self, x):
x = self.head_drop(x)
if self.distillation:
x1, x2 = self.head(x), self.head_dist(x)
if self.training and self.distilled_training and not torch.jit.is_scripting():
return x1, x2
else:
return (x1 + x2) / 2
else:
x = self.head(x)
return x
@torch.no_grad()
def fuse(self):
if not self.num_classes > 0:
return nn.Identity()
head = self.head.fuse()
if self.distillation:
head_dist = self.head_dist.fuse()
head.weight += head_dist.weight
head.bias += head_dist.bias
head.weight /= 2
head.bias /= 2
return head
else:
return head
class RepVitStage(nn.Module):
def __init__(self, in_dim, out_dim, depth, mlp_ratio, act_layer, kernel_size=3, downsample=True, legacy=False):
super().__init__()
if downsample:
self.downsample = RepVitDownsample(in_dim, mlp_ratio, out_dim, kernel_size, act_layer, legacy)
else:
assert in_dim == out_dim
self.downsample = nn.Identity()
blocks = []
use_se = True
for _ in range(depth):
blocks.append(RepViTBlock(out_dim, mlp_ratio, kernel_size, use_se, act_layer, legacy))
use_se = not use_se
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
x = self.downsample(x)
x = self.blocks(x)
return x
class RepVit(nn.Module):
def __init__(
self,
in_chans=3,
img_size=224,
embed_dim=(48,),
depth=(2,),
mlp_ratio=2,
global_pool='avg',
kernel_size=3,
num_classes=1000,
act_layer=nn.GELU,
distillation=True,
drop_rate=0.0,
legacy=False,
):
super(RepVit, self).__init__()
self.grad_checkpointing = False
self.global_pool = global_pool
self.embed_dim = embed_dim
self.num_classes = num_classes
in_dim = embed_dim[0]
self.stem = RepVitStem(in_chans, in_dim, act_layer)
stride = self.stem.stride
resolution = tuple([i // p for i, p in zip(to_2tuple(img_size), to_2tuple(stride))])
num_stages = len(embed_dim)
mlp_ratios = to_ntuple(num_stages)(mlp_ratio)
self.feature_info = []
stages = []
for i in range(num_stages):
downsample = True if i != 0 else False
stages.append(
RepVitStage(
in_dim,
embed_dim[i],
depth[i],
mlp_ratio=mlp_ratios[i],
act_layer=act_layer,
kernel_size=kernel_size,
downsample=downsample,
legacy=legacy,
)
)
stage_stride = 2 if downsample else 1
stride *= stage_stride
resolution = tuple([(r - 1) // stage_stride + 1 for r in resolution])
self.feature_info += [dict(num_chs=embed_dim[i], reduction=stride, module=f'stages.{i}')]
in_dim = embed_dim[i]
self.stages = nn.Sequential(*stages)
self.num_features = self.head_hidden_size = embed_dim[-1]
self.head_drop = nn.Dropout(drop_rate)
self.head = RepVitClassifier(embed_dim[-1], num_classes, distillation)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(stem=r'^stem', blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]) # stem and embed
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None, distillation: bool = False):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = RepVitClassifier(self.embed_dim[-1], num_classes, distillation)
@torch.jit.ignore
def set_distilled_training(self, enable=True):
self.head.distilled_training = enable
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool == 'avg':
x = x.mean((2, 3), keepdim=False)
x = self.head_drop(x)
if pre_logits:
return x
return self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
@torch.no_grad()
def fuse(self):
def fuse_children(net):
for child_name, child in net.named_children():
if hasattr(child, 'fuse'):
fused = child.fuse()
setattr(net, child_name, fused)
fuse_children(fused)
else:
fuse_children(child)
fuse_children(self)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000,
'input_size': (3, 224, 224),
'pool_size': (7, 7),
'crop_pct': 0.95,
'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN,
'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv1.c',
'classifier': ('head.head.l', 'head.head_dist.l'),
**kwargs,
}
default_cfgs = generate_default_cfgs(
{
'repvit_m1.dist_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m2.dist_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m3.dist_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m0_9.dist_300e_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m0_9.dist_450e_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m1_0.dist_300e_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m1_0.dist_450e_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m1_1.dist_300e_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m1_1.dist_450e_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m1_5.dist_300e_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m1_5.dist_450e_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m2_3.dist_300e_in1k': _cfg(
hf_hub_id='timm/',
),
'repvit_m2_3.dist_450e_in1k': _cfg(
hf_hub_id='timm/',
),
}
)
def _create_repvit(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', (0, 1, 2, 3))
model = build_model_with_cfg(
RepVit,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs,
)
return model
@register_model
def repvit_m1(pretrained=False, **kwargs):
"""
Constructs a RepViT-M1 model
"""
model_args = dict(embed_dim=(48, 96, 192, 384), depth=(2, 2, 14, 2), legacy=True)
return _create_repvit('repvit_m1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def repvit_m2(pretrained=False, **kwargs):
"""
Constructs a RepViT-M2 model
"""
model_args = dict(embed_dim=(64, 128, 256, 512), depth=(2, 2, 12, 2), legacy=True)
return _create_repvit('repvit_m2', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def repvit_m3(pretrained=False, **kwargs):
"""
Constructs a RepViT-M3 model
"""
model_args = dict(embed_dim=(64, 128, 256, 512), depth=(4, 4, 18, 2), legacy=True)
return _create_repvit('repvit_m3', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def repvit_m0_9(pretrained=False, **kwargs):
"""
Constructs a RepViT-M0.9 model
"""
model_args = dict(embed_dim=(48, 96, 192, 384), depth=(2, 2, 14, 2))
return _create_repvit('repvit_m0_9', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def repvit_m1_0(pretrained=False, **kwargs):
"""
Constructs a RepViT-M1.0 model
"""
model_args = dict(embed_dim=(56, 112, 224, 448), depth=(2, 2, 14, 2))
return _create_repvit('repvit_m1_0', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def repvit_m1_1(pretrained=False, **kwargs):
"""
Constructs a RepViT-M1.1 model
"""
model_args = dict(embed_dim=(64, 128, 256, 512), depth=(2, 2, 12, 2))
return _create_repvit('repvit_m1_1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def repvit_m1_5(pretrained=False, **kwargs):
"""
Constructs a RepViT-M1.5 model
"""
model_args = dict(embed_dim=(64, 128, 256, 512), depth=(4, 4, 24, 4))
return _create_repvit('repvit_m1_5', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def repvit_m2_3(pretrained=False, **kwargs):
"""
Constructs a RepViT-M2.3 model
"""
model_args = dict(embed_dim=(80, 160, 320, 640), depth=(6, 6, 34, 2))
return _create_repvit('repvit_m2_3', pretrained=pretrained, **dict(model_args, **kwargs))
| pytorch-image-models/timm/models/repvit.py/0 | {
"file_path": "pytorch-image-models/timm/models/repvit.py",
"repo_id": "pytorch-image-models",
"token_count": 8378
} |
""" Twins
A PyTorch impl of : `Twins: Revisiting the Design of Spatial Attention in Vision Transformers`
- https://arxiv.org/pdf/2104.13840.pdf
Code/weights from https://github.com/Meituan-AutoML/Twins, original copyright/license info below
"""
# --------------------------------------------------------
# Twins
# Copyright (c) 2021 Meituan
# Licensed under The Apache 2.0 License [see LICENSE for details]
# Written by Xinjie Li, Xiangxiang Chu
# --------------------------------------------------------
import math
from functools import partial
from typing import List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import Mlp, DropPath, to_2tuple, trunc_normal_, use_fused_attn
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._features_fx import register_notrace_module
from ._registry import register_model, generate_default_cfgs
from .vision_transformer import Attention
__all__ = ['Twins'] # model_registry will add each entrypoint fn to this
Size_ = Tuple[int, int]
@register_notrace_module # reason: FX can't symbolically trace control flow in forward method
class LocallyGroupedAttn(nn.Module):
""" LSA: self attention within a group
"""
fused_attn: torch.jit.Final[bool]
def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., ws=1):
assert ws != 1
super(LocallyGroupedAttn, self).__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim * 3, bias=True)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.ws = ws
def forward(self, x, size: Size_):
# There are two implementations for this function, zero padding or mask. We don't observe obvious difference for
# both. You can choose any one, we recommend forward_padding because it's neat. However,
# the masking implementation is more reasonable and accurate.
B, N, C = x.shape
H, W = size
x = x.view(B, H, W, C)
pad_l = pad_t = 0
pad_r = (self.ws - W % self.ws) % self.ws
pad_b = (self.ws - H % self.ws) % self.ws
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, Hp, Wp, _ = x.shape
_h, _w = Hp // self.ws, Wp // self.ws
x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3)
qkv = self.qkv(x).reshape(
B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5)
q, k, v = qkv.unbind(0)
if self.fused_attn:
x = F.scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C)
x = x.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C)
if pad_r > 0 or pad_b > 0:
x = x[:, :H, :W, :].contiguous()
x = x.reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
# def forward_mask(self, x, size: Size_):
# B, N, C = x.shape
# H, W = size
# x = x.view(B, H, W, C)
# pad_l = pad_t = 0
# pad_r = (self.ws - W % self.ws) % self.ws
# pad_b = (self.ws - H % self.ws) % self.ws
# x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
# _, Hp, Wp, _ = x.shape
# _h, _w = Hp // self.ws, Wp // self.ws
# mask = torch.zeros((1, Hp, Wp), device=x.device)
# mask[:, -pad_b:, :].fill_(1)
# mask[:, :, -pad_r:].fill_(1)
#
# x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) # B, _h, _w, ws, ws, C
# mask = mask.reshape(1, _h, self.ws, _w, self.ws).transpose(2, 3).reshape(1, _h * _w, self.ws * self.ws)
# attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3) # 1, _h*_w, ws*ws, ws*ws
# attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-1000.0)).masked_fill(attn_mask == 0, float(0.0))
# qkv = self.qkv(x).reshape(
# B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5)
# # n_h, B, _w*_h, nhead, ws*ws, dim
# q, k, v = qkv[0], qkv[1], qkv[2] # B, _h*_w, n_head, ws*ws, dim_head
# attn = (q @ k.transpose(-2, -1)) * self.scale # B, _h*_w, n_head, ws*ws, ws*ws
# attn = attn + attn_mask.unsqueeze(2)
# attn = attn.softmax(dim=-1)
# attn = self.attn_drop(attn) # attn @v -> B, _h*_w, n_head, ws*ws, dim_head
# attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C)
# x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C)
# if pad_r > 0 or pad_b > 0:
# x = x[:, :H, :W, :].contiguous()
# x = x.reshape(B, N, C)
# x = self.proj(x)
# x = self.proj_drop(x)
# return x
class GlobalSubSampleAttn(nn.Module):
""" GSA: using a key to summarize the information for a group to be efficient.
"""
fused_attn: torch.jit.Final[bool]
def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., sr_ratio=1):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.q = nn.Linear(dim, dim, bias=True)
self.kv = nn.Linear(dim, dim * 2, bias=True)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.LayerNorm(dim)
else:
self.sr = None
self.norm = None
def forward(self, x, size: Size_):
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if self.sr is not None:
x = x.permute(0, 2, 1).reshape(B, C, *size)
x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1)
x = self.norm(x)
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv.unbind(0)
if self.fused_attn:
x = torch.nn.functional.scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
sr_ratio=1,
ws=None,
):
super().__init__()
self.norm1 = norm_layer(dim)
if ws is None:
self.attn = Attention(dim, num_heads, False, None, attn_drop, proj_drop)
elif ws == 1:
self.attn = GlobalSubSampleAttn(dim, num_heads, attn_drop, proj_drop, sr_ratio)
else:
self.attn = LocallyGroupedAttn(dim, num_heads, attn_drop, proj_drop, ws)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x, size: Size_):
x = x + self.drop_path1(self.attn(self.norm1(x), size))
x = x + self.drop_path2(self.mlp(self.norm2(x)))
return x
class PosConv(nn.Module):
# PEG from https://arxiv.org/abs/2102.10882
def __init__(self, in_chans, embed_dim=768, stride=1):
super(PosConv, self).__init__()
self.proj = nn.Sequential(
nn.Conv2d(in_chans, embed_dim, 3, stride, 1, bias=True, groups=embed_dim),
)
self.stride = stride
def forward(self, x, size: Size_):
B, N, C = x.shape
cnn_feat_token = x.transpose(1, 2).view(B, C, *size)
x = self.proj(cnn_feat_token)
if self.stride == 1:
x += cnn_feat_token
x = x.flatten(2).transpose(1, 2)
return x
def no_weight_decay(self):
return ['proj.%d.weight' % i for i in range(4)]
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \
f"img_size {img_size} should be divided by patch_size {patch_size}."
self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
self.num_patches = self.H * self.W
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.norm = nn.LayerNorm(embed_dim)
def forward(self, x) -> Tuple[torch.Tensor, Size_]:
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
x = self.norm(x)
out_size = (H // self.patch_size[0], W // self.patch_size[1])
return x, out_size
class Twins(nn.Module):
""" Twins Vision Transformer (Revisiting Spatial Attention)
Adapted from PVT (PyramidVisionTransformer) class at https://github.com/whai362/PVT.git
"""
def __init__(
self,
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
global_pool='avg',
embed_dims=(64, 128, 256, 512),
num_heads=(1, 2, 4, 8),
mlp_ratios=(4, 4, 4, 4),
depths=(3, 4, 6, 3),
sr_ratios=(8, 4, 2, 1),
wss=None,
drop_rate=0.,
pos_drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
block_cls=Block,
):
super().__init__()
self.num_classes = num_classes
self.global_pool = global_pool
self.depths = depths
self.embed_dims = embed_dims
self.num_features = self.head_hidden_size = embed_dims[-1]
self.grad_checkpointing = False
img_size = to_2tuple(img_size)
prev_chs = in_chans
self.patch_embeds = nn.ModuleList()
self.pos_drops = nn.ModuleList()
for i in range(len(depths)):
self.patch_embeds.append(PatchEmbed(img_size, patch_size, prev_chs, embed_dims[i]))
self.pos_drops.append(nn.Dropout(p=pos_drop_rate))
prev_chs = embed_dims[i]
img_size = tuple(t // patch_size for t in img_size)
patch_size = 2
self.blocks = nn.ModuleList()
self.feature_info = []
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
cur = 0
for k in range(len(depths)):
_block = nn.ModuleList([block_cls(
dim=embed_dims[k],
num_heads=num_heads[k],
mlp_ratio=mlp_ratios[k],
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[cur + i],
norm_layer=norm_layer,
sr_ratio=sr_ratios[k],
ws=1 if wss is None or i % 2 == 1 else wss[k]) for i in range(depths[k])],
)
self.blocks.append(_block)
self.feature_info += [dict(module=f'block.{k}', num_chs=embed_dims[k], reduction=2**(2+k))]
cur += depths[k]
self.pos_block = nn.ModuleList([PosConv(embed_dim, embed_dim) for embed_dim in embed_dims])
self.norm = norm_layer(self.num_features)
# classification head
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
# init weights
self.apply(self._init_weights)
@torch.jit.ignore
def no_weight_decay(self):
return set(['pos_block.' + n for n, p in self.pos_block.named_parameters()])
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^patch_embeds.0', # stem and embed
blocks=[
(r'^(?:blocks|patch_embeds|pos_block)\.(\d+)', None),
('^norm', (99999,))
] if coarse else [
(r'^blocks\.(\d+)\.(\d+)', None),
(r'^(?:patch_embeds|pos_block)\.(\d+)', (0,)),
(r'^norm', (99999,))
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'avg')
self.global_pool = global_pool
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to all intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt == 'NCHW', 'Output shape for Twins must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
# FIXME slice block/pos_block if < max
# forward pass
B, _, height, width = x.shape
for i, (embed, drop, blocks, pos_blk) in enumerate(zip(
self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)
):
x, size = embed(x)
x = drop(x)
for j, blk in enumerate(blocks):
x = blk(x, size)
if j == 0:
x = pos_blk(x, size) # PEG here
if i < len(self.depths) - 1:
x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous()
if i in take_indices:
intermediates.append(x)
else:
if i in take_indices:
# only last feature can be normed
x_feat = self.norm(x) if norm else x
intermediates.append(x_feat.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous())
if intermediates_only:
return intermediates
x = self.norm(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
# FIXME add block pruning
if prune_norm:
self.norm = nn.Identity()
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
B = x.shape[0]
for i, (embed, drop, blocks, pos_blk) in enumerate(
zip(self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)):
x, size = embed(x)
x = drop(x)
for j, blk in enumerate(blocks):
x = blk(x, size)
if j == 0:
x = pos_blk(x, size) # PEG here
if i < len(self.depths) - 1:
x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous()
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool == 'avg':
x = x.mean(dim=1)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_twins(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', 4)
model = build_model_with_cfg(
Twins, variant, pretrained,
feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embeds.0.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'twins_pcpvt_small.in1k': _cfg(hf_hub_id='timm/'),
'twins_pcpvt_base.in1k': _cfg(hf_hub_id='timm/'),
'twins_pcpvt_large.in1k': _cfg(hf_hub_id='timm/'),
'twins_svt_small.in1k': _cfg(hf_hub_id='timm/'),
'twins_svt_base.in1k': _cfg(hf_hub_id='timm/'),
'twins_svt_large.in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def twins_pcpvt_small(pretrained=False, **kwargs) -> Twins:
model_args = dict(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1])
return _create_twins('twins_pcpvt_small', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def twins_pcpvt_base(pretrained=False, **kwargs) -> Twins:
model_args = dict(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1])
return _create_twins('twins_pcpvt_base', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def twins_pcpvt_large(pretrained=False, **kwargs) -> Twins:
model_args = dict(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1])
return _create_twins('twins_pcpvt_large', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def twins_svt_small(pretrained=False, **kwargs) -> Twins:
model_args = dict(
patch_size=4, embed_dims=[64, 128, 256, 512], num_heads=[2, 4, 8, 16], mlp_ratios=[4, 4, 4, 4],
depths=[2, 2, 10, 4], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1])
return _create_twins('twins_svt_small', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def twins_svt_base(pretrained=False, **kwargs) -> Twins:
model_args = dict(
patch_size=4, embed_dims=[96, 192, 384, 768], num_heads=[3, 6, 12, 24], mlp_ratios=[4, 4, 4, 4],
depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1])
return _create_twins('twins_svt_base', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def twins_svt_large(pretrained=False, **kwargs) -> Twins:
model_args = dict(
patch_size=4, embed_dims=[128, 256, 512, 1024], num_heads=[4, 8, 16, 32], mlp_ratios=[4, 4, 4, 4],
depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1])
return _create_twins('twins_svt_large', pretrained=pretrained, **dict(model_args, **kwargs))
| pytorch-image-models/timm/models/twins.py/0 | {
"file_path": "pytorch-image-models/timm/models/twins.py",
"repo_id": "pytorch-image-models",
"token_count": 11134
} |
from typing import Any, Dict, Iterable, Union, Protocol, Type
try:
from typing import TypeAlias, TypeVar
except ImportError:
from typing_extensions import TypeAlias, TypeVar
import torch
import torch.optim
try:
from torch.optim.optimizer import ParamsT
except (ImportError, TypeError):
ParamsT: TypeAlias = Union[Iterable[torch.Tensor], Iterable[Dict[str, Any]]]
OptimType = Type[torch.optim.Optimizer]
class OptimizerCallable(Protocol):
"""Protocol for optimizer constructor signatures."""
def __call__(self, params: ParamsT, **kwargs) -> torch.optim.Optimizer: ...
__all__ = ['ParamsT', 'OptimType', 'OptimizerCallable'] | pytorch-image-models/timm/optim/_types.py/0 | {
"file_path": "pytorch-image-models/timm/optim/_types.py",
"repo_id": "pytorch-image-models",
"token_count": 217
} |
""" PyTorch MARS Optimizer
Code simplified from https://github.com/AGI-Arena/MARS
Paper: MARS: Unleashing the Power of Variance Reduction for Training Large Models - https://arxiv.org/abs/2411.10438
@article{yuan2024mars,
title={MARS: Unleashing the Power of Variance Reduction for Training Large Models},
author={Yuan, Huizhuo and Liu, Yifeng and Wu, Shuang and Zhou, Xun and Gu, Quanquan},
journal={arXiv preprint arXiv:2411.10438},
year={2024}
}
"""
# Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
# SPDX-License-Identifier: Apache-2.0
import math
from typing import Optional, Tuple
import torch
from torch.optim.optimizer import Optimizer
from ._types import ParamsT
def _mars_single_tensor_step(
p: torch.Tensor,
grad: torch.Tensor,
exp_avg: torch.Tensor,
exp_avg_sq: torch.Tensor,
lr: float,
weight_decay: float,
beta1: float,
beta2: float,
last_grad: torch.Tensor,
eps: float,
step: int,
gamma: float,
mars_type: str,
is_grad_2d: bool,
optimize_1d: bool,
lr_1d_factor: bool,
betas_1d: Tuple[float, float],
caution: bool,
):
# optimize_1d ==> use MARS for 1d param, else use AdamW
if optimize_1d or is_grad_2d:
one_minus_beta1 = 1. - beta1
if step == 1:
# this is a timm addition, making first step more consistent when no grad history, otherwise tests fail
c_t = grad
else:
c_t = (grad - last_grad).mul_(gamma * (beta1 / one_minus_beta1)).add_(grad)
c_t_norm = torch.norm(c_t)
if c_t_norm > 1.:
c_t = c_t / c_t_norm
exp_avg.mul_(beta1).add_(c_t, alpha=one_minus_beta1)
if caution:
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
mask = (exp_avg * grad > 0).to(grad.dtype)
mask.div_(mask.mean().clamp_(min=1e-3))
exp_avg = exp_avg * mask
if mars_type == "adamw":
exp_avg_sq.mul_(beta2).addcmul_(c_t, c_t, value=1. - beta2)
bias_correction1 = 1.0 - beta1 ** step
bias_correction2 = 1.0 - beta2 ** step
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
update = p * weight_decay + (exp_avg / bias_correction1).div_(denom)
elif mars_type == "lion":
update = p * weight_decay + exp_avg.sign()
else:
assert False
p.add_(update, alpha=-lr)
else:
beta1_1d, beta2_1d = betas_1d
exp_avg.mul_(beta1_1d).add_(grad, alpha=1. - beta1_1d)
exp_avg_sq.mul_(beta2_1d).addcmul_(grad, grad, value=1. - beta2_1d)
bias_correction1 = 1.0 - beta1_1d ** step
bias_correction2 = 1.0 - beta2_1d ** step
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
if caution:
mask = (exp_avg * grad > 0).to(grad.dtype)
mask.div_(mask.mean().clamp_(min=1e-3))
exp_avg = exp_avg * mask
update = p * weight_decay + (exp_avg / bias_correction1).div_(denom)
p.add_(update, alpha=-(lr * lr_1d_factor))
return exp_avg, exp_avg_sq
class Mars(Optimizer):
""" MARS Optimizer
Paper: MARS: Unleashing the Power of Variance Reduction for Training Large Models
https://arxiv.org/abs/2411.10438
"""
def __init__(
self,
params: ParamsT,
lr: float = 3e-3,
betas: Tuple[float, float] = (0.9, 0.99),
eps: float = 1e-8,
weight_decay: float = 0.,
gamma: float = 0.025,
mars_type: str = "adamw",
optimize_1d: bool = False,
lr_1d_factor: float = 1.0,
betas_1d: Optional[Tuple[float, float]] = None,
caution: bool = False
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
assert mars_type in ["adamw", "lion"], "MARS type not supported"
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
mars_type=mars_type,
gamma=gamma,
optimize_1d=optimize_1d,
lr_1d_factor=lr_1d_factor,
betas_1d=betas_1d or betas,
caution=caution,
)
super(Mars, self).__init__(params, defaults)
def __setstate__(self, state):
super(Mars, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('caution', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) <= 1:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Last Gradient
state['last_grad'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
state['step'] += 1
step = state['step']
exp_avg = state['exp_avg']
exp_avg_sq = state['exp_avg_sq']
last_grad = state['last_grad']
lr = group['lr']
wd = group['weight_decay']
beta1, beta2 = group['betas']
is_grad_2d = grad.ndim >= 2
# FIXME add multi-tensor (if usage warrants), make more standard
_mars_single_tensor_step(
p,
grad,
exp_avg,
exp_avg_sq,
lr,
wd,
beta1,
beta2,
last_grad,
group['eps'],
step,
group['gamma'],
mars_type=group['mars_type'],
is_grad_2d=is_grad_2d,
optimize_1d=group['optimize_1d'],
lr_1d_factor=group['lr_1d_factor'],
betas_1d=group['betas_1d'],
caution=group['caution'],
)
state['last_grad'] = grad
return loss
| pytorch-image-models/timm/optim/mars.py/0 | {
"file_path": "pytorch-image-models/timm/optim/mars.py",
"repo_id": "pytorch-image-models",
"token_count": 3950
} |
""" Scheduler Factory
Hacked together by / Copyright 2021 Ross Wightman
"""
from typing import List, Optional, Union
from torch.optim import Optimizer
from .cosine_lr import CosineLRScheduler
from .multistep_lr import MultiStepLRScheduler
from .plateau_lr import PlateauLRScheduler
from .poly_lr import PolyLRScheduler
from .step_lr import StepLRScheduler
from .tanh_lr import TanhLRScheduler
def scheduler_kwargs(cfg, decreasing_metric: Optional[bool] = None):
""" cfg/argparse to kwargs helper
Convert scheduler args in argparse args or cfg (.dot) like object to keyword args.
"""
eval_metric = getattr(cfg, 'eval_metric', 'top1')
if decreasing_metric is not None:
plateau_mode = 'min' if decreasing_metric else 'max'
else:
plateau_mode = 'min' if 'loss' in eval_metric else 'max'
kwargs = dict(
sched=cfg.sched,
num_epochs=getattr(cfg, 'epochs', 100),
decay_epochs=getattr(cfg, 'decay_epochs', 30),
decay_milestones=getattr(cfg, 'decay_milestones', [30, 60]),
warmup_epochs=getattr(cfg, 'warmup_epochs', 5),
cooldown_epochs=getattr(cfg, 'cooldown_epochs', 0),
patience_epochs=getattr(cfg, 'patience_epochs', 10),
decay_rate=getattr(cfg, 'decay_rate', 0.1),
min_lr=getattr(cfg, 'min_lr', 0.),
warmup_lr=getattr(cfg, 'warmup_lr', 1e-5),
warmup_prefix=getattr(cfg, 'warmup_prefix', False),
noise=getattr(cfg, 'lr_noise', None),
noise_pct=getattr(cfg, 'lr_noise_pct', 0.67),
noise_std=getattr(cfg, 'lr_noise_std', 1.),
noise_seed=getattr(cfg, 'seed', 42),
cycle_mul=getattr(cfg, 'lr_cycle_mul', 1.),
cycle_decay=getattr(cfg, 'lr_cycle_decay', 0.1),
cycle_limit=getattr(cfg, 'lr_cycle_limit', 1),
k_decay=getattr(cfg, 'lr_k_decay', 1.0),
plateau_mode=plateau_mode,
step_on_epochs=not getattr(cfg, 'sched_on_updates', False),
)
return kwargs
def create_scheduler(
args,
optimizer: Optimizer,
updates_per_epoch: int = 0,
):
return create_scheduler_v2(
optimizer=optimizer,
**scheduler_kwargs(args),
updates_per_epoch=updates_per_epoch,
)
def create_scheduler_v2(
optimizer: Optimizer,
sched: str = 'cosine',
num_epochs: int = 300,
decay_epochs: int = 90,
decay_milestones: List[int] = (90, 180, 270),
cooldown_epochs: int = 0,
patience_epochs: int = 10,
decay_rate: float = 0.1,
min_lr: float = 0,
warmup_lr: float = 1e-5,
warmup_epochs: int = 0,
warmup_prefix: bool = False,
noise: Union[float, List[float]] = None,
noise_pct: float = 0.67,
noise_std: float = 1.,
noise_seed: int = 42,
cycle_mul: float = 1.,
cycle_decay: float = 0.1,
cycle_limit: int = 1,
k_decay: float = 1.0,
plateau_mode: str = 'max',
step_on_epochs: bool = True,
updates_per_epoch: int = 0,
):
t_initial = num_epochs
warmup_t = warmup_epochs
decay_t = decay_epochs
cooldown_t = cooldown_epochs
if not step_on_epochs:
assert updates_per_epoch > 0, 'updates_per_epoch must be set to number of dataloader batches'
t_initial = t_initial * updates_per_epoch
warmup_t = warmup_t * updates_per_epoch
decay_t = decay_t * updates_per_epoch
decay_milestones = [d * updates_per_epoch for d in decay_milestones]
cooldown_t = cooldown_t * updates_per_epoch
# warmup args
warmup_args = dict(
warmup_lr_init=warmup_lr,
warmup_t=warmup_t,
warmup_prefix=warmup_prefix,
)
# setup noise args for supporting schedulers
if noise is not None:
if isinstance(noise, (list, tuple)):
noise_range = [n * t_initial for n in noise]
if len(noise_range) == 1:
noise_range = noise_range[0]
else:
noise_range = noise * t_initial
else:
noise_range = None
noise_args = dict(
noise_range_t=noise_range,
noise_pct=noise_pct,
noise_std=noise_std,
noise_seed=noise_seed,
)
# setup cycle args for supporting schedulers
cycle_args = dict(
cycle_mul=cycle_mul,
cycle_decay=cycle_decay,
cycle_limit=cycle_limit,
)
lr_scheduler = None
if sched == 'cosine':
lr_scheduler = CosineLRScheduler(
optimizer,
t_initial=t_initial,
lr_min=min_lr,
t_in_epochs=step_on_epochs,
**cycle_args,
**warmup_args,
**noise_args,
k_decay=k_decay,
)
elif sched == 'tanh':
lr_scheduler = TanhLRScheduler(
optimizer,
t_initial=t_initial,
lr_min=min_lr,
t_in_epochs=step_on_epochs,
**cycle_args,
**warmup_args,
**noise_args,
)
elif sched == 'step':
lr_scheduler = StepLRScheduler(
optimizer,
decay_t=decay_t,
decay_rate=decay_rate,
t_in_epochs=step_on_epochs,
**warmup_args,
**noise_args,
)
elif sched == 'multistep':
lr_scheduler = MultiStepLRScheduler(
optimizer,
decay_t=decay_milestones,
decay_rate=decay_rate,
t_in_epochs=step_on_epochs,
**warmup_args,
**noise_args,
)
elif sched == 'plateau':
assert step_on_epochs, 'Plateau LR only supports step per epoch.'
warmup_args.pop('warmup_prefix', False)
lr_scheduler = PlateauLRScheduler(
optimizer,
decay_rate=decay_rate,
patience_t=patience_epochs,
cooldown_t=0,
**warmup_args,
lr_min=min_lr,
mode=plateau_mode,
**noise_args,
)
elif sched == 'poly':
lr_scheduler = PolyLRScheduler(
optimizer,
power=decay_rate, # overloading 'decay_rate' as polynomial power
t_initial=t_initial,
lr_min=min_lr,
t_in_epochs=step_on_epochs,
k_decay=k_decay,
**cycle_args,
**warmup_args,
**noise_args,
)
if hasattr(lr_scheduler, 'get_cycle_length'):
# For cycle based schedulers (cosine, tanh, poly) recalculate total epochs w/ cycles & cooldown
# NOTE: Warmup prefix added in get_cycle_lengths() if enabled
t_with_cycles_and_cooldown = lr_scheduler.get_cycle_length() + cooldown_t
if step_on_epochs:
num_epochs = t_with_cycles_and_cooldown
else:
num_epochs = t_with_cycles_and_cooldown // updates_per_epoch
else:
if warmup_prefix:
num_epochs += warmup_epochs
return lr_scheduler, num_epochs
| pytorch-image-models/timm/scheduler/scheduler_factory.py/0 | {
"file_path": "pytorch-image-models/timm/scheduler/scheduler_factory.py",
"repo_id": "pytorch-image-models",
"token_count": 3536
} |
""" Exponential Moving Average (EMA) of model updates
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
from collections import OrderedDict
from copy import deepcopy
from typing import Optional
import torch
import torch.nn as nn
_logger = logging.getLogger(__name__)
class ModelEma:
""" Model Exponential Moving Average (DEPRECATED)
Keep a moving average of everything in the model state_dict (parameters and buffers).
This version is deprecated, it does not work with scripted models. Will be removed eventually.
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, device='', resume=''):
# make a copy of the model for accumulating moving average of weights
self.ema = deepcopy(model)
self.ema.eval()
self.decay = decay
self.device = device # perform ema on different device from model if set
if device:
self.ema.to(device=device)
self.ema_has_module = hasattr(self.ema, 'module')
if resume:
self._load_checkpoint(resume)
for p in self.ema.parameters():
p.requires_grad_(False)
def _load_checkpoint(self, checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
assert isinstance(checkpoint, dict)
if 'state_dict_ema' in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict_ema'].items():
# ema model may have been wrapped by DataParallel, and need module prefix
if self.ema_has_module:
name = 'module.' + k if not k.startswith('module') else k
else:
name = k
new_state_dict[name] = v
self.ema.load_state_dict(new_state_dict)
_logger.info("Loaded state_dict_ema")
else:
_logger.warning("Failed to find state_dict_ema, starting from loaded model weights")
def update(self, model):
# correct a mismatch in state dict keys
needs_module = hasattr(model, 'module') and not self.ema_has_module
with torch.no_grad():
msd = model.state_dict()
for k, ema_v in self.ema.state_dict().items():
if needs_module:
k = 'module.' + k
model_v = msd[k].detach()
if self.device:
model_v = model_v.to(device=self.device)
ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v)
class ModelEmaV2(nn.Module):
""" Model Exponential Moving Average V2
Keep a moving average of everything in the model state_dict (parameters and buffers).
V2 of this module is simpler, it does not match params/buffers based on name but simply
iterates in order. It works with torchscript (JIT of full model).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, device=None):
super().__init__()
# make a copy of the model for accumulating moving average of weights
self.module = deepcopy(model)
self.module.eval()
self.decay = decay
self.device = device # perform ema on different device from model if set
if self.device is not None:
self.module.to(device=device)
def _update(self, model, update_fn):
with torch.no_grad():
for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):
if self.device is not None:
model_v = model_v.to(device=self.device)
ema_v.copy_(update_fn(ema_v, model_v))
def update(self, model):
self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m)
def set(self, model):
self._update(model, update_fn=lambda e, m: m)
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
class ModelEmaV3(nn.Module):
""" Model Exponential Moving Average V3
Keep a moving average of everything in the model state_dict (parameters and buffers).
V3 of this module leverages for_each and in-place operations for faster performance.
Decay warmup based on code by @crowsonkb, her comments:
If inv_gamma=1 and power=1, implements a simple average. inv_gamma=1, power=2/3 are
good values for models you plan to train for a million or more steps (reaches decay
factor 0.999 at 31.6K steps, 0.9999 at 1M steps), inv_gamma=1, power=3/4 for models
you plan to train for less (reaches decay factor 0.999 at 10K steps, 0.9999 at
215.4k steps).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(
self,
model,
decay: float = 0.9999,
min_decay: float = 0.0,
update_after_step: int = 0,
use_warmup: bool = False,
warmup_gamma: float = 1.0,
warmup_power: float = 2/3,
device: Optional[torch.device] = None,
foreach: bool = True,
exclude_buffers: bool = False,
):
super().__init__()
# make a copy of the model for accumulating moving average of weights
self.module = deepcopy(model)
self.module.eval()
self.decay = decay
self.min_decay = min_decay
self.update_after_step = update_after_step
self.use_warmup = use_warmup
self.warmup_gamma = warmup_gamma
self.warmup_power = warmup_power
self.foreach = foreach
self.device = device # perform ema on different device from model if set
self.exclude_buffers = exclude_buffers
if self.device is not None and device != next(model.parameters()).device:
self.foreach = False # cannot use foreach methods with different devices
self.module.to(device=device)
def get_decay(self, step: Optional[int] = None) -> float:
"""
Compute the decay factor for the exponential moving average.
"""
if step is None:
return self.decay
step = max(0, step - self.update_after_step - 1)
if step <= 0:
return 0.0
if self.use_warmup:
decay = 1 - (1 + step / self.warmup_gamma) ** -self.warmup_power
decay = max(min(decay, self.decay), self.min_decay)
else:
decay = self.decay
return decay
@torch.no_grad()
def update(self, model, step: Optional[int] = None):
decay = self.get_decay(step)
if self.exclude_buffers:
self.apply_update_no_buffers_(model, decay)
else:
self.apply_update_(model, decay)
def apply_update_(self, model, decay: float):
# interpolate parameters and buffers
if self.foreach:
ema_lerp_values = []
model_lerp_values = []
for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):
if ema_v.is_floating_point():
ema_lerp_values.append(ema_v)
model_lerp_values.append(model_v)
else:
ema_v.copy_(model_v)
if hasattr(torch, '_foreach_lerp_'):
torch._foreach_lerp_(ema_lerp_values, model_lerp_values, weight=1. - decay)
else:
torch._foreach_mul_(ema_lerp_values, scalar=decay)
torch._foreach_add_(ema_lerp_values, model_lerp_values, alpha=1. - decay)
else:
for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):
if ema_v.is_floating_point():
ema_v.lerp_(model_v.to(device=self.device), weight=1. - decay)
else:
ema_v.copy_(model_v.to(device=self.device))
def apply_update_no_buffers_(self, model, decay: float):
# interpolate parameters, copy buffers
ema_params = tuple(self.module.parameters())
model_params = tuple(model.parameters())
if self.foreach:
if hasattr(torch, '_foreach_lerp_'):
torch._foreach_lerp_(ema_params, model_params, weight=1. - decay)
else:
torch._foreach_mul_(ema_params, scalar=decay)
torch._foreach_add_(ema_params, model_params, alpha=1 - decay)
else:
for ema_p, model_p in zip(ema_params, model_params):
ema_p.lerp_(model_p.to(device=self.device), weight=1. - decay)
for ema_b, model_b in zip(self.module.buffers(), model.buffers()):
ema_b.copy_(model_b.to(device=self.device))
@torch.no_grad()
def set(self, model):
for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):
ema_v.copy_(model_v.to(device=self.device))
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs) | pytorch-image-models/timm/utils/model_ema.py/0 | {
"file_path": "pytorch-image-models/timm/utils/model_ema.py",
"repo_id": "pytorch-image-models",
"token_count": 4614
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# अच्छे Agents का निर्माण
[[open-in-colab]]
एक ऐसा एजेंट बनाने में जो काम करता है और जो काम नहीं करता है, इसमें ज़मीन-आसमान का अंतर है।
हम कैसे ऐसे एजेंट्स बना सकते हैं जो बाद वाली श्रेणी में आते हैं?
इस गाइड में, हम एजेंट्स बनाने के लिए सर्वोत्तम प्रक्रियाएँ के बारे में बात करेंगे।
> [!TIP]
> यदि आप एजेंट्स बनाने में नए हैं, तो पहले [एजेंट्स का परिचय](../conceptual_guides/intro_agents) और [smolagents की गाइडेड टूर](../guided_tour) पढ़ना सुनिश्चित करें।
### सर्वश्रेष्ठ एजेंटिक सिस्टम सबसे सरल होते हैं: वर्कफ़्लो को जितना हो सके उतना सरल बनाएं
अपने वर्कफ़्लो में एक LLM को कुछ एजेंसी देने से त्रुटियों का जोखिम होता है।
अच्छी तरह से प्रोग्राम किए गए एजेंटिक सिस्टम में वैसे भी अच्छी एरर लॉगिंग और रीट्राई मैकेनिज्म होते हैं, जिससे LLM इंजन अपनी गलतियों को सुधारने का मौका मिलता है। लेकिन LLM त्रुटि के जोखिम को अधिकतम कम करने के लिए, आपको अपना वर्कफ़्लो सरल बनाना चाहिए!
आइए [एजेंट्स का परिचय](../conceptual_guides/intro_agents) से उदाहरण पर फिर से विचार करें: एक सर्फ ट्रिप कंपनी के लिए उपयोगकर्ता प्रश्नों का उत्तर देने वाला बॉट।
एजेंट को हर बार जब एक नए सर्फ स्पॉट के बारे में पूछा जाता है तो "travel distance API" और "weather API" के लिए 2 अलग-अलग कॉल करने देने के बजाय, आप केवल एक एकीकृत टूल "return_spot_information" बना सकते हैं, एक फंक्शन जो दोनों APIs को एक साथ कॉल करता है और उनके संयोजित आउटपुट को उपयोगकर्ता को वापस करता है।
यह लागत, देरी और त्रुटि जोखिम को कम करेगा!
मुख्य दिशानिर्देश है: LLM कॉल्स की संख्या को जितना हो सके उतना कम करें।
इससे कुछ निष्कर्ष निकलते हैं:
- जब भी संभव हो, दो APIs के हमारे उदाहरण की तरह 2 टूल्स को एक में समूहित करें।
- जब भी संभव हो, लॉजिक एजेंटिक निर्णयों के बजाय डिटरमिनिस्टिक फंक्शंस पर आधारित होनी चाहिए।
### LLM इंजन को जानकारी के प्रवाह में सुधार करें
याद रखें कि आपका LLM इंजन एक *बुद्धिमान* रोबोट की तरह है, जो एक कमरे में बंद है, और बाहरी दुनिया के साथ इसका एकमात्र संचार दरवाजे के नीचे से नोट्स पास करना है।
यह किसी भी ऐसी चीज के बारे में नहीं जानेगा जिसे आप स्पष्ट रूप से अपने प्रॉम्प्ट में नहीं डालते हैं।
इसलिए पहले अपने कार्य को बहुत स्पष्ट बनाने से शुरू करें!
चूंकि एक एजेंट LLM द्वारा संचालित होता है, आपके कार्य के निर्माण में छोटे बदलाव भी पूरी तरह से अलग परिणाम दे सकते हैं।
फिर, टूल के उपयोग में अपने एजेंट की ओर जानकारी के प्रवाह में सुधार करें।
पालन करने के लिए विशेष दिशानिर्देश:
- प्रत्येक टूल को वह सब कुछ लॉग करना चाहिए (टूल की `forward` मेथड के अंदर केवल `print` स्टेटमेंट्स का उपयोग करके) जो LLM इंजन के लिए उपयोगी हो सकता है।
- विशेष रूप से, टूल एक्जीक्यूशन गलतियों पर विस्तृत लॉगिंग बहुत मदद करेगी!
उदाहरण के लिए, यहाँ एक टूल है जो लोकेशन और डेट-टाइम के आधार पर मौसम डेटा प्राप्त करता है:
पहले, यहाँ एक खराब रूप है:
```python
import datetime
from smolagents import tool
def get_weather_report_at_coordinates(coordinates, date_time):
# Dummy function, returns a list of [temperature in °C, risk of rain on a scale 0-1, wave height in m]
return [28.0, 0.35, 0.85]
def convert_location_to_coordinates(location):
# Returns dummy coordinates
return [3.3, -42.0]
@tool
def get_weather_api(location: str, date_time: str) -> str:
"""
Returns the weather report.
Args:
location: the name of the place that you want the weather for.
date_time: the date and time for which you want the report.
"""
lon, lat = convert_location_to_coordinates(location)
date_time = datetime.strptime(date_time)
return str(get_weather_report_at_coordinates((lon, lat), date_time))
```
# यह खराब क्यों है?
- `date_time` के लिए उपयोग किए जाने वाले फॉर्मेट की सटीकता का कोई उल्लेख नहीं है।
- यह स्पष्ट नहीं है कि स्थान (location) को किस प्रकार निर्दिष्ट किया जाना चाहिए।
- त्रुटियों को स्पष्ट रूप से इंगित करने के लिए कोई लॉगिंग मेकैनिज्म मौजूद नहीं है, जैसे कि स्थान गलत फॉर्मेट में होना या `date_time` का सही ढंग से फॉर्मेट न होना।
- आउटपुट फॉर्मेट समझने में कठिन है।
यदि टूल कॉल विफल हो जाती है, तो मेमोरी में लॉग की गई एरर ट्रेस LLM को टूल की समस्याओं को ठीक करने के लिए रिवर्स इंजीनियरिंग में मदद कर सकती है। लेकिन इतना सारा काम LLM को ही क्यों करने देना?
इस टूल को बेहतर तरीके से बनाने का एक उदाहरण इस प्रकार हो सकता है:
```python
@tool
def get_weather_api(location: str, date_time: str) -> str:
"""
Returns the weather report.
Args:
location: the name of the place that you want the weather for. Should be a place name, followed by possibly a city name, then a country, like "Anchor Point, Taghazout, Morocco".
date_time: the date and time for which you want the report, formatted as '%m/%d/%y %H:%M:%S'.
"""
lon, lat = convert_location_to_coordinates(location)
try:
date_time = datetime.strptime(date_time)
except Exception as e:
raise ValueError("Conversion of `date_time` to datetime format failed, make sure to provide a string in format '%m/%d/%y %H:%M:%S'. Full trace:" + str(e))
temperature_celsius, risk_of_rain, wave_height = get_weather_report_at_coordinates((lon, lat), date_time)
return f"Weather report for {location}, {date_time}: Temperature will be {temperature_celsius}°C, risk of rain is {risk_of_rain*100:.0f}%, wave height is {wave_height}m."
```
सामान्य तौर पर, अपने LLM का बोझ को कम करने के लिए, खुद से यह अच्छा सवाल पूछें: "यदि मैं नया और अनुभवहीन हूं और इस टूल का पहली बार उपयोग कर रहा हूं, तो इस टूल के साथ प्रोग्रामिंग करना और अपनी गलतियों को ठीक करना मेरे लिए कितना आसान होगा?"
### एजेंट को अधिक तर्क (arguments) दें
अपने एजेंट को कार्य का वर्णन करने वाले साधारण स्ट्रिंग से आगे बढ़कर कुछ अतिरिक्त ऑब्जेक्ट्स देने के लिए, आप `additional_args` का उपयोग कर सकते हैं। यह आपको किसी भी प्रकार का ऑब्जेक्ट पास करने की सुविधा देता है:
```py
from smolagents import CodeAgent, HfApiModel
model_id = "meta-llama/Llama-3.3-70B-Instruct"
agent = CodeAgent(tools=[], model=HfApiModel(model_id=model_id), add_base_tools=True)
agent.run(
"Why does Mike not know many people in New York?",
additional_args={"mp3_sound_file_url":'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/recording.mp3'}
)
```
उदाहरण के लिए, आप इस `additional_args` आर्ग्यूमेंट का उपयोग उन इमेजेज़ या स्ट्रिंग्स को पास करने के लिए कर सकते हैं जिन्हें आप चाहते हैं कि आपका एजेंट उपयोग करे।
## अपने एजेंट को डिबग कैसे करें
### 1. एक अधिक शक्तिशाली LLM का उपयोग करें
एजेंटिक वर्कफ़्लो में, कुछ त्रुटियां वास्तविक होती हैं, जबकि कुछ अन्य त्रुटियां आपके LLM इंजन के सही तरीके से तर्क न कर पाने की वजह से होती हैं।
उदाहरण के लिए, इस ट्रेस को देखें, जहां मैंने एक `CodeAgent` से एक कार की तस्वीर बनाने के लिए कहा:
```
==================================================================================================== New task ====================================================================================================
Make me a cool car picture
──────────────────────────────────────────────────────────────────────────────────────────────────── New step ────────────────────────────────────────────────────────────────────────────────────────────────────
Agent is executing the code below: ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
image_generator(prompt="A cool, futuristic sports car with LED headlights, aerodynamic design, and vibrant color, high-res, photorealistic")
──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
Last output from code snippet: ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
/var/folders/6m/9b1tts6d5w960j80wbw9tx3m0000gn/T/tmpx09qfsdd/652f0007-3ee9-44e2-94ac-90dae6bb89a4.png
Step 1:
- Time taken: 16.35 seconds
- Input tokens: 1,383
- Output tokens: 77
──────────────────────────────────────────────────────────────────────────────────────────────────── New step ────────────────────────────────────────────────────────────────────────────────────────────────────
Agent is executing the code below: ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
final_answer("/var/folders/6m/9b1tts6d5w960j80wbw9tx3m0000gn/T/tmpx09qfsdd/652f0007-3ee9-44e2-94ac-90dae6bb89a4.png")
──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
Print outputs:
Last output from code snippet: ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
/var/folders/6m/9b1tts6d5w960j80wbw9tx3m0000gn/T/tmpx09qfsdd/652f0007-3ee9-44e2-94ac-90dae6bb89a4.png
Final answer:
/var/folders/6m/9b1tts6d5w960j80wbw9tx3m0000gn/T/tmpx09qfsdd/652f0007-3ee9-44e2-94ac-90dae6bb89a4.png
```
उपयोगकर्ता को, एक इमेज लौटाए जाने के बजाय, उन्हें एक पाथ लौटाया जाता है।
यह सिस्टम से एक बग की तरह दिख सकता है, लेकिन वास्तव में एजेंटिक सिस्टम ने त्रुटि नहीं की: यह केवल इसलिए है कि LLM ब्रेन ने इमेज आउटपुट को एक वेरिएबल में सेव करने की गलती की।
इस प्रकार यह इमेज को फिर से एक्सेस नहीं कर सकता है सिवाय इमेज को सेव करते समय लॉग किए गए पाथ का उपयोग करके, इसलिए यह इमेज के बजाय पाथ लौटाता है।
अपने एजेंट को डीबग करने का पहला कदम इस प्रकार है "एक अधिक शक्तिशाली LLM का उपयोग करें"। `Qwen2/5-72B-Instruct` जैसे विकल्प वह गलती नहीं करते।
### 2. अधिक मार्गदर्शन / अधिक जानकारी प्रदान करें
आप कम शक्तिशाली मॉडल्स का भी उपयोग कर सकते हैं, बशर्ते आप उन्हें अधिक प्रभावी ढंग से मार्गदर्शन करें।
अपने आप को अपने मॉडल की जगह रखें: यदि आप कार्य को हल करने वाला मॉडल होते, तो क्या आप उपलब्ध जानकारी (सिस्टम प्रॉम्प्ट + कार्य निर्माण + टूल विवरण से) के साथ संघर्ष करते?
क्या आपको कुछ अतिरिक्त स्पष्टीकरण की आवश्यकता होती?
अतिरिक्त जानकारी प्रदान करने के लिए, हम तुरंत सिस्टम प्रॉम्प्ट को बदलने की सलाह नहीं देते हैं: डिफ़ॉल्ट सिस्टम प्रॉम्प्ट में कई समायोजन हैं जिन्हें आप तब तक नहीं बिगाड़ना चाहते जब तक आप प्रॉम्प्ट को बहुत अच्छी तरह से नहीं समझते।
अपने LLM इंजन को मार्गदर्शन करने के बेहतर तरीके हैं:
- यदि यह कार्य को हल करने के बारे में है: इन सभी विवरणों को कार्य में जोड़ें। यह कार्य 100 पेज लंबा हो सकता है
- यदि यह टूल्स के उपयोग के बारे में है: आपके टूल्स की विवरण विशेषता।
### 3. सिस्टम प्रॉम्प्ट बदलें (आमतौर पर यह सलाह नहीं दी जाती)
यदि उपरोक्त स्पष्टीकरण पर्याप्त नहीं हैं, तो आप सिस्टम प्रॉम्प्ट बदल सकते हैं।
आइए देखें कि यह कैसे काम करता है। उदाहरण के लिए, आइए [`CodeAgent`] के लिए डिफ़ॉल्ट सिस्टम प्रॉम्प्ट की जाँच करें (नीचे दिया गया वर्जन जीरो-शॉट उदाहरणों को छोड़कर छोटा किया गया है)।
```python
print(agent.prompt_templates["system_prompt"])
```
Here is what you get:
```text
You are an expert assistant who can solve any task using code blobs. You will be given a task to solve as best you can.
To do so, you have been given access to a list of tools: these tools are basically Python functions which you can call with code.
To solve the task, you must plan forward to proceed in a series of steps, in a cycle of 'Thought:', 'Code:', and 'Observation:' sequences.
At each step, in the 'Thought:' sequence, you should first explain your reasoning towards solving the task and the tools that you want to use.
Then in the 'Code:' sequence, you should write the code in simple Python. The code sequence must end with '<end_code>' sequence.
During each intermediate step, you can use 'print()' to save whatever important information you will then need.
These print outputs will then appear in the 'Observation:' field, which will be available as input for the next step.
In the end you have to return a final answer using the `final_answer` tool.
Here are a few examples using notional tools:
---
{examples}
Above example were using notional tools that might not exist for you. On top of performing computations in the Python code snippets that you create, you only have access to these tools:
{{tool_descriptions}}
{{managed_agents_descriptions}}
Here are the rules you should always follow to solve your task:
1. Always provide a 'Thought:' sequence, and a 'Code:\n```py' sequence ending with '```<end_code>' sequence, else you will fail.
2. Use only variables that you have defined!
3. Always use the right arguments for the tools. DO NOT pass the arguments as a dict as in 'answer = wiki({'query': "What is the place where James Bond lives?"})', but use the arguments directly as in 'answer = wiki(query="What is the place where James Bond lives?")'.
4. Take care to not chain too many sequential tool calls in the same code block, especially when the output format is unpredictable. For instance, a call to search has an unpredictable return format, so do not have another tool call that depends on its output in the same block: rather output results with print() to use them in the next block.
5. Call a tool only when needed, and never re-do a tool call that you previously did with the exact same parameters.
6. Don't name any new variable with the same name as a tool: for instance don't name a variable 'final_answer'.
7. Never create any notional variables in our code, as having these in your logs might derail you from the true variables.
8. You can use imports in your code, but only from the following list of modules: {{authorized_imports}}
9. The state persists between code executions: so if in one step you've created variables or imported modules, these will all persist.
10. Don't give up! You're in charge of solving the task, not providing directions to solve it.
Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000.
```
जैसा कि आप देख सकते हैं, `"{{tool_descriptions}}"` जैसे प्लेसहोल्डर्स हैं: इनका उपयोग एजेंट इनिशियलाइजेशन के समय टूल्स या मैनेज्ड एजेंट्स के कुछ स्वचालित रूप से जनरेट किए गए विवरणों को डालने के लिए किया जाएगा।
इसलिए जबकि आप `system_prompt` पैरामीटर में अपने कस्टम प्रॉम्प्ट को आर्गुमेंट के रूप में पास करके इस सिस्टम प्रॉम्प्ट टेम्पलेट को ओवरराइट कर सकते हैं, आपके नए सिस्टम प्रॉम्प्ट में निम्नलिखित प्लेसहोल्डर्स होने चाहिए:
- टूल विवरण डालने के लिए `"{{tool_descriptions}}"`।
- यदि कोई मैनेज्ड एजेंट्स हैं तो उनके लिए विवरण डालने के लिए `"{{managed_agents_description}}"`।
- केवल `CodeAgent` के लिए: अधिकृत इम्पोर्ट्स की सूची डालने के लिए `"{{authorized_imports}}"`।
फिर आप सिस्टम प्रॉम्प्ट को निम्नानुसार बदल सकते हैं:
```py
from smolagents.prompts import CODE_SYSTEM_PROMPT
modified_system_prompt = CODE_SYSTEM_PROMPT + "\nHere you go!" # Change the system prompt here
agent = CodeAgent(
tools=[],
model=HfApiModel(),
system_prompt=modified_system_prompt
)
```
This also works with the [`ToolCallingAgent`].
### 4. अतिरिक्त योजना
हम पूरक योजना चरण के लिए एक मॉडल प्रदान करते हैं, जिसे एजेंट सामान्य क्रियाओं के चरणों के बीच नियमित रूप से चला सकता है। इस चरण में कोई टूल कॉल नहीं होती है, LLM से केवल उन तथ्यों की सूची को अपडेट करने के लिए कहा जाता है जो उसे ज्ञात हैं और इन तथ्यों के आधार पर उसे अगले कदमों के बारे में विचार करना होता है।
```py
from smolagents import load_tool, CodeAgent, HfApiModel, DuckDuckGoSearchTool
from dotenv import load_dotenv
load_dotenv()
# Import tool from Hub
image_generation_tool = load_tool("m-ric/text-to-image", trust_remote_code=True)
search_tool = DuckDuckGoSearchTool()
agent = CodeAgent(
tools=[search_tool],
model=HfApiModel("Qwen/Qwen2.5-72B-Instruct"),
planning_interval=3 # This is where you activate planning!
)
# Run it!
result = agent.run(
"How long would a cheetah at full speed take to run the length of Pont Alexandre III?",
)
```
| smolagents/docs/source/hi/tutorials/building_good_agents.md/0 | {
"file_path": "smolagents/docs/source/hi/tutorials/building_good_agents.md",
"repo_id": "smolagents",
"token_count": 12733
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# 安全代码执行
[[open-in-colab]]
> [!TIP]
> 如果你是第一次构建 agent,请先阅读 [agent 介绍](../conceptual_guides/intro_agents) 和 [smolagents 导览](../guided_tour)。
### 代码智能体
[多项](https://huggingface.co/papers/2402.01030) [研究](https://huggingface.co/papers/2411.01747) [表明](https://huggingface.co/papers/2401.00812),让大语言模型用代码编写其动作(工具调用)比当前标准的工具调用格式要好得多,目前行业标准是 "将动作写成包含工具名称和参数的 JSON" 的各种变体。
为什么代码更好?因为我们专门为计算机执行的动作而设计编程语言。如果 JSON 片段是更好的方式,那么这个工具包就应该是用 JSON 片段编写的,魔鬼就会嘲笑我们。
代码就是表达计算机动作的更好方式。它具有更好的:
- **组合性**:你能像定义 Python 函数那样,在 JSON 动作中嵌套其他 JSON 动作,或者定义一组 JSON 动作以便以后重用吗?
- **对象管理**:你如何在 JSON 中存储像 `generate_image` 这样的动作的输出?
- **通用性**:代码是为了简单地表达任何可以让计算机做的事情而构建的。
- **在 LLM 训练语料库中的表示**:天赐良机,为什么不利用已经包含在 LLM 训练语料库中的大量高质量动作呢?
下图展示了这一点,取自 [可执行代码动作引出更好的 LLM 智能体](https://huggingface.co/papers/2402.01030)。
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/code_vs_json_actions.png">
这就是为什么我们强调提出代码智能体,在本例中是 Python 智能体,这意味着我们要在构建安全的 Python 解释器上投入更多精力。
### 本地 Python 解释器
默认情况下,`CodeAgent` 会在你的环境中运行 LLM 生成的代码。
这个执行不是由普通的 Python 解释器完成的:我们从零开始重新构建了一个更安全的 `LocalPythonInterpreter`。
这个解释器通过以下方式设计以确保安全:
- 将导入限制为用户显式传递的列表
- 限制操作次数以防止无限循环和资源膨胀
- 不会执行任何未预定义的操作
我们已经在许多用例中使用了这个解释器,从未观察到对环境造成任何损害。
然而,这个解决方案并不是万无一失的:可以想象,如果 LLM 被微调用于恶意操作,仍然可能损害你的环境。例如,如果你允许像 `Pillow` 这样无害的包处理图像,LLM 可能会生成数千张图像保存以膨胀你的硬盘。
如果你自己选择了 LLM 引擎,这当然不太可能,但它可能会发生。
所以如果你想格外谨慎,可以使用下面描述的远程代码执行选项。
### E2B 代码执行器
为了最大程度的安全性,你可以使用我们与 E2B 的集成在沙盒环境中运行代码。这是一个远程执行服务,可以在隔离的容器中运行你的代码,使代码无法影响你的本地环境。
为此,你需要设置你的 E2B 账户并在环境变量中设置 `E2B_API_KEY`。请前往 [E2B 快速入门文档](https://e2b.dev/docs/quickstart) 了解更多信息。
然后你可以通过 `pip install e2b-code-interpreter python-dotenv` 安装它。
现在你已经准备好了!
要将代码执行器设置为 E2B,只需在初始化 `CodeAgent` 时传递标志 `use_e2b_executor=True`。
请注意,你应该将所有工具的依赖项添加到 `additional_authorized_imports` 中,以便执行器安装它们。
```py
from smolagents import CodeAgent, VisitWebpageTool, HfApiModel
agent = CodeAgent(
tools = [VisitWebpageTool()],
model=HfApiModel(),
additional_authorized_imports=["requests", "markdownify"],
use_e2b_executor=True
)
agent.run("What was Abraham Lincoln's preferred pet?")
```
目前 E2B 代码执行暂不兼容多 agent——因为把 agent 调用放在应该在远程执行的代码块里,是非常混乱的。但我们正在努力做到这件事!
| smolagents/docs/source/zh/tutorials/secure_code_execution.md/0 | {
"file_path": "smolagents/docs/source/zh/tutorials/secure_code_execution.md",
"repo_id": "smolagents",
"token_count": 2729
} |
# This is copied from Magentic-one's great repo: https://github.com/microsoft/autogen/blob/v0.4.4/python/packages/autogen-magentic-one/src/autogen_magentic_one/markdown_browser/mdconvert.py
# Thanks to Microsoft researchers for open-sourcing this!
# type: ignore
import base64
import copy
import html
import json
import mimetypes
import os
import re
import shutil
import subprocess
import sys
import tempfile
import traceback
import zipfile
from typing import Any, Dict, List, Optional, Union
from urllib.parse import parse_qs, quote, unquote, urlparse, urlunparse
import mammoth
import markdownify
import pandas as pd
import pdfminer
import pdfminer.high_level
import pptx
# File-format detection
import puremagic
import pydub
import requests
import speech_recognition as sr
from bs4 import BeautifulSoup
from youtube_transcript_api import YouTubeTranscriptApi
from youtube_transcript_api.formatters import SRTFormatter
class _CustomMarkdownify(markdownify.MarkdownConverter):
"""
A custom version of markdownify's MarkdownConverter. Changes include:
- Altering the default heading style to use '#', '##', etc.
- Removing javascript hyperlinks.
- Truncating images with large data:uri sources.
- Ensuring URIs are properly escaped, and do not conflict with Markdown syntax
"""
def __init__(self, **options: Any):
options["heading_style"] = options.get("heading_style", markdownify.ATX)
# Explicitly cast options to the expected type if necessary
super().__init__(**options)
def convert_hn(self, n: int, el: Any, text: str, convert_as_inline: bool) -> str:
"""Same as usual, but be sure to start with a new line"""
if not convert_as_inline:
if not re.search(r"^\n", text):
return "\n" + super().convert_hn(n, el, text, convert_as_inline) # type: ignore
return super().convert_hn(n, el, text, convert_as_inline) # type: ignore
def convert_a(self, el: Any, text: str, convert_as_inline: bool):
"""Same as usual converter, but removes Javascript links and escapes URIs."""
prefix, suffix, text = markdownify.chomp(text) # type: ignore
if not text:
return ""
href = el.get("href")
title = el.get("title")
# Escape URIs and skip non-http or file schemes
if href:
try:
parsed_url = urlparse(href) # type: ignore
if parsed_url.scheme and parsed_url.scheme.lower() not in ["http", "https", "file"]: # type: ignore
return "%s%s%s" % (prefix, text, suffix)
href = urlunparse(parsed_url._replace(path=quote(unquote(parsed_url.path)))) # type: ignore
except ValueError: # It's not clear if this ever gets thrown
return "%s%s%s" % (prefix, text, suffix)
# For the replacement see #29: text nodes underscores are escaped
if (
self.options["autolinks"]
and text.replace(r"\_", "_") == href
and not title
and not self.options["default_title"]
):
# Shortcut syntax
return "<%s>" % href
if self.options["default_title"] and not title:
title = href
title_part = ' "%s"' % title.replace('"', r"\"") if title else ""
return "%s[%s](%s%s)%s" % (prefix, text, href, title_part, suffix) if href else text
def convert_img(self, el: Any, text: str, convert_as_inline: bool) -> str:
"""Same as usual converter, but removes data URIs"""
alt = el.attrs.get("alt", None) or ""
src = el.attrs.get("src", None) or ""
title = el.attrs.get("title", None) or ""
title_part = ' "%s"' % title.replace('"', r"\"") if title else ""
if convert_as_inline and el.parent.name not in self.options["keep_inline_images_in"]:
return alt
# Remove dataURIs
if src.startswith("data:"):
src = src.split(",")[0] + "..."
return "" % (alt, src, title_part)
def convert_soup(self, soup: Any) -> str:
return super().convert_soup(soup) # type: ignore
class DocumentConverterResult:
"""The result of converting a document to text."""
def __init__(self, title: Union[str, None] = None, text_content: str = ""):
self.title: Union[str, None] = title
self.text_content: str = text_content
class DocumentConverter:
"""Abstract superclass of all DocumentConverters."""
def convert(self, local_path: str, **kwargs: Any) -> Union[None, DocumentConverterResult]:
raise NotImplementedError()
class PlainTextConverter(DocumentConverter):
"""Anything with content type text/plain"""
def convert(self, local_path: str, **kwargs: Any) -> Union[None, DocumentConverterResult]:
# Guess the content type from any file extension that might be around
content_type, _ = mimetypes.guess_type("__placeholder" + kwargs.get("file_extension", ""))
# Only accept text files
if content_type is None:
return None
# elif "text/" not in content_type.lower():
# return None
text_content = ""
with open(local_path, "rt", encoding="utf-8") as fh:
text_content = fh.read()
return DocumentConverterResult(
title=None,
text_content=text_content,
)
class HtmlConverter(DocumentConverter):
"""Anything with content type text/html"""
def convert(self, local_path: str, **kwargs: Any) -> Union[None, DocumentConverterResult]:
# Bail if not html
extension = kwargs.get("file_extension", "")
if extension.lower() not in [".html", ".htm"]:
return None
result = None
with open(local_path, "rt", encoding="utf-8") as fh:
result = self._convert(fh.read())
return result
def _convert(self, html_content: str) -> Union[None, DocumentConverterResult]:
"""Helper function that converts and HTML string."""
# Parse the string
soup = BeautifulSoup(html_content, "html.parser")
# Remove javascript and style blocks
for script in soup(["script", "style"]):
script.extract()
# Print only the main content
body_elm = soup.find("body")
webpage_text = ""
if body_elm:
webpage_text = _CustomMarkdownify().convert_soup(body_elm)
else:
webpage_text = _CustomMarkdownify().convert_soup(soup)
assert isinstance(webpage_text, str)
return DocumentConverterResult(
title=None if soup.title is None else soup.title.string, text_content=webpage_text
)
class WikipediaConverter(DocumentConverter):
"""Handle Wikipedia pages separately, focusing only on the main document content."""
def convert(self, local_path: str, **kwargs: Any) -> Union[None, DocumentConverterResult]:
# Bail if not Wikipedia
extension = kwargs.get("file_extension", "")
if extension.lower() not in [".html", ".htm"]:
return None
url = kwargs.get("url", "")
if not re.search(r"^https?:\/\/[a-zA-Z]{2,3}\.wikipedia.org\/", url):
return None
# Parse the file
soup = None
with open(local_path, "rt", encoding="utf-8") as fh:
soup = BeautifulSoup(fh.read(), "html.parser")
# Remove javascript and style blocks
for script in soup(["script", "style"]):
script.extract()
# Print only the main content
body_elm = soup.find("div", {"id": "mw-content-text"})
title_elm = soup.find("span", {"class": "mw-page-title-main"})
webpage_text = ""
main_title = None if soup.title is None else soup.title.string
if body_elm:
# What's the title
if title_elm and len(title_elm) > 0:
main_title = title_elm.string # type: ignore
assert isinstance(main_title, str)
# Convert the page
webpage_text = f"# {main_title}\n\n" + _CustomMarkdownify().convert_soup(body_elm)
else:
webpage_text = _CustomMarkdownify().convert_soup(soup)
return DocumentConverterResult(
title=main_title,
text_content=webpage_text,
)
class YouTubeConverter(DocumentConverter):
"""Handle YouTube specially, focusing on the video title, description, and transcript."""
def convert(self, local_path: str, **kwargs: Any) -> Union[None, DocumentConverterResult]:
# Bail if not YouTube
extension = kwargs.get("file_extension", "")
if extension.lower() not in [".html", ".htm"]:
return None
url = kwargs.get("url", "")
if not url.startswith("https://www.youtube.com/watch?"):
return None
# Parse the file
soup = None
with open(local_path, "rt", encoding="utf-8") as fh:
soup = BeautifulSoup(fh.read(), "html.parser")
# Read the meta tags
assert soup.title is not None and soup.title.string is not None
metadata: Dict[str, str] = {"title": soup.title.string}
for meta in soup(["meta"]):
for a in meta.attrs:
if a in ["itemprop", "property", "name"]:
metadata[meta[a]] = meta.get("content", "")
break
# We can also try to read the full description. This is more prone to breaking, since it reaches into the page implementation
try:
for script in soup(["script"]):
content = script.text
if "ytInitialData" in content:
lines = re.split(r"\r?\n", content)
obj_start = lines[0].find("{")
obj_end = lines[0].rfind("}")
if obj_start >= 0 and obj_end >= 0:
data = json.loads(lines[0][obj_start : obj_end + 1])
attrdesc = self._findKey(data, "attributedDescriptionBodyText") # type: ignore
if attrdesc:
metadata["description"] = str(attrdesc["content"])
break
except Exception:
pass
# Start preparing the page
webpage_text = "# YouTube\n"
title = self._get(metadata, ["title", "og:title", "name"]) # type: ignore
assert isinstance(title, str)
if title:
webpage_text += f"\n## {title}\n"
stats = ""
views = self._get(metadata, ["interactionCount"]) # type: ignore
if views:
stats += f"- **Views:** {views}\n"
keywords = self._get(metadata, ["keywords"]) # type: ignore
if keywords:
stats += f"- **Keywords:** {keywords}\n"
runtime = self._get(metadata, ["duration"]) # type: ignore
if runtime:
stats += f"- **Runtime:** {runtime}\n"
if len(stats) > 0:
webpage_text += f"\n### Video Metadata\n{stats}\n"
description = self._get(metadata, ["description", "og:description"]) # type: ignore
if description:
webpage_text += f"\n### Description\n{description}\n"
transcript_text = ""
parsed_url = urlparse(url) # type: ignore
params = parse_qs(parsed_url.query) # type: ignore
if "v" in params:
assert isinstance(params["v"][0], str)
video_id = str(params["v"][0])
try:
# Must be a single transcript.
transcript = YouTubeTranscriptApi.get_transcript(video_id) # type: ignore
# transcript_text = " ".join([part["text"] for part in transcript]) # type: ignore
# Alternative formatting:
transcript_text = SRTFormatter().format_transcript(transcript)
except Exception:
pass
if transcript_text:
webpage_text += f"\n### Transcript\n{transcript_text}\n"
title = title if title else soup.title.string
assert isinstance(title, str)
return DocumentConverterResult(
title=title,
text_content=webpage_text,
)
def _get(self, metadata: Dict[str, str], keys: List[str], default: Union[str, None] = None) -> Union[str, None]:
for k in keys:
if k in metadata:
return metadata[k]
return default
def _findKey(self, json: Any, key: str) -> Union[str, None]: # TODO: Fix json type
if isinstance(json, list):
for elm in json:
ret = self._findKey(elm, key)
if ret is not None:
return ret
elif isinstance(json, dict):
for k in json:
if k == key:
return json[k]
else:
ret = self._findKey(json[k], key)
if ret is not None:
return ret
return None
class PdfConverter(DocumentConverter):
"""
Converts PDFs to Markdown. Most style information is ignored, so the results are essentially plain-text.
"""
def convert(self, local_path, **kwargs) -> Union[None, DocumentConverterResult]:
# Bail if not a PDF
extension = kwargs.get("file_extension", "")
if extension.lower() != ".pdf":
return None
return DocumentConverterResult(
title=None,
text_content=pdfminer.high_level.extract_text(local_path),
)
class DocxConverter(HtmlConverter):
"""
Converts DOCX files to Markdown. Style information (e.g.m headings) and tables are preserved where possible.
"""
def convert(self, local_path, **kwargs) -> Union[None, DocumentConverterResult]:
# Bail if not a DOCX
extension = kwargs.get("file_extension", "")
if extension.lower() != ".docx":
return None
result = None
with open(local_path, "rb") as docx_file:
result = mammoth.convert_to_html(docx_file)
html_content = result.value
result = self._convert(html_content)
return result
class XlsxConverter(HtmlConverter):
"""
Converts XLSX files to Markdown, with each sheet presented as a separate Markdown table.
"""
def convert(self, local_path, **kwargs) -> Union[None, DocumentConverterResult]:
# Bail if not a XLSX
extension = kwargs.get("file_extension", "")
if extension.lower() not in [".xlsx", ".xls"]:
return None
sheets = pd.read_excel(local_path, sheet_name=None)
md_content = ""
for s in sheets:
md_content += f"## {s}\n"
html_content = sheets[s].to_html(index=False)
md_content += self._convert(html_content).text_content.strip() + "\n\n"
return DocumentConverterResult(
title=None,
text_content=md_content.strip(),
)
class PptxConverter(HtmlConverter):
"""
Converts PPTX files to Markdown. Supports heading, tables and images with alt text.
"""
def convert(self, local_path, **kwargs) -> Union[None, DocumentConverterResult]:
# Bail if not a PPTX
extension = kwargs.get("file_extension", "")
if extension.lower() != ".pptx":
return None
md_content = ""
presentation = pptx.Presentation(local_path)
slide_num = 0
for slide in presentation.slides:
slide_num += 1
md_content += f"\n\n<!-- Slide number: {slide_num} -->\n"
title = slide.shapes.title
for shape in slide.shapes:
# Pictures
if self._is_picture(shape):
# https://github.com/scanny/python-pptx/pull/512#issuecomment-1713100069
alt_text = ""
try:
alt_text = shape._element._nvXxPr.cNvPr.attrib.get("descr", "")
except Exception:
pass
# A placeholder name
filename = re.sub(r"\W", "", shape.name) + ".jpg"
md_content += "\n\n"
# Tables
if self._is_table(shape):
html_table = "<html><body><table>"
first_row = True
for row in shape.table.rows:
html_table += "<tr>"
for cell in row.cells:
if first_row:
html_table += "<th>" + html.escape(cell.text) + "</th>"
else:
html_table += "<td>" + html.escape(cell.text) + "</td>"
html_table += "</tr>"
first_row = False
html_table += "</table></body></html>"
md_content += "\n" + self._convert(html_table).text_content.strip() + "\n"
# Text areas
elif shape.has_text_frame:
if shape == title:
md_content += "# " + shape.text.lstrip() + "\n"
else:
md_content += shape.text + "\n"
md_content = md_content.strip()
if slide.has_notes_slide:
md_content += "\n\n### Notes:\n"
notes_frame = slide.notes_slide.notes_text_frame
if notes_frame is not None:
md_content += notes_frame.text
md_content = md_content.strip()
return DocumentConverterResult(
title=None,
text_content=md_content.strip(),
)
def _is_picture(self, shape):
if shape.shape_type == pptx.enum.shapes.MSO_SHAPE_TYPE.PICTURE:
return True
if shape.shape_type == pptx.enum.shapes.MSO_SHAPE_TYPE.PLACEHOLDER:
if hasattr(shape, "image"):
return True
return False
def _is_table(self, shape):
if shape.shape_type == pptx.enum.shapes.MSO_SHAPE_TYPE.TABLE:
return True
return False
class MediaConverter(DocumentConverter):
"""
Abstract class for multi-modal media (e.g., images and audio)
"""
def _get_metadata(self, local_path):
exiftool = shutil.which("exiftool")
if not exiftool:
return None
else:
try:
result = subprocess.run([exiftool, "-json", local_path], capture_output=True, text=True).stdout
return json.loads(result)[0]
except Exception:
return None
class WavConverter(MediaConverter):
"""
Converts WAV files to markdown via extraction of metadata (if `exiftool` is installed), and speech transcription (if `speech_recognition` is installed).
"""
def convert(self, local_path, **kwargs) -> Union[None, DocumentConverterResult]:
# Bail if not a XLSX
extension = kwargs.get("file_extension", "")
if extension.lower() != ".wav":
return None
md_content = ""
# Add metadata
metadata = self._get_metadata(local_path)
if metadata:
for f in [
"Title",
"Artist",
"Author",
"Band",
"Album",
"Genre",
"Track",
"DateTimeOriginal",
"CreateDate",
"Duration",
]:
if f in metadata:
md_content += f"{f}: {metadata[f]}\n"
# Transcribe
try:
transcript = self._transcribe_audio(local_path)
md_content += "\n\n### Audio Transcript:\n" + ("[No speech detected]" if transcript == "" else transcript)
except Exception:
md_content += "\n\n### Audio Transcript:\nError. Could not transcribe this audio."
return DocumentConverterResult(
title=None,
text_content=md_content.strip(),
)
def _transcribe_audio(self, local_path) -> str:
recognizer = sr.Recognizer()
with sr.AudioFile(local_path) as source:
audio = recognizer.record(source)
return recognizer.recognize_google(audio).strip()
class Mp3Converter(WavConverter):
"""
Converts MP3 files to markdown via extraction of metadata (if `exiftool` is installed), and speech transcription (if `speech_recognition` AND `pydub` are installed).
"""
def convert(self, local_path, **kwargs) -> Union[None, DocumentConverterResult]:
# Bail if not a MP3
extension = kwargs.get("file_extension", "")
if extension.lower() != ".mp3":
return None
md_content = ""
# Add metadata
metadata = self._get_metadata(local_path)
if metadata:
for f in [
"Title",
"Artist",
"Author",
"Band",
"Album",
"Genre",
"Track",
"DateTimeOriginal",
"CreateDate",
"Duration",
]:
if f in metadata:
md_content += f"{f}: {metadata[f]}\n"
# Transcribe
handle, temp_path = tempfile.mkstemp(suffix=".wav")
os.close(handle)
try:
sound = pydub.AudioSegment.from_mp3(local_path)
sound.export(temp_path, format="wav")
_args = dict()
_args.update(kwargs)
_args["file_extension"] = ".wav"
try:
transcript = super()._transcribe_audio(temp_path).strip()
md_content += "\n\n### Audio Transcript:\n" + (
"[No speech detected]" if transcript == "" else transcript
)
except Exception:
md_content += "\n\n### Audio Transcript:\nError. Could not transcribe this audio."
finally:
os.unlink(temp_path)
# Return the result
return DocumentConverterResult(
title=None,
text_content=md_content.strip(),
)
class ZipConverter(DocumentConverter):
"""
Extracts ZIP files to a permanent local directory and returns a listing of extracted files.
"""
def __init__(self, extract_dir: str = "downloads"):
"""
Initialize with path to extraction directory.
Args:
extract_dir: The directory where files will be extracted. Defaults to "downloads"
"""
self.extract_dir = extract_dir
# Create the extraction directory if it doesn't exist
os.makedirs(self.extract_dir, exist_ok=True)
def convert(self, local_path: str, **kwargs: Any) -> Union[None, DocumentConverterResult]:
# Bail if not a ZIP file
extension = kwargs.get("file_extension", "")
if extension.lower() != ".zip":
return None
# Verify it's actually a ZIP file
if not zipfile.is_zipfile(local_path):
return None
# Extract all files and build list
extracted_files = []
with zipfile.ZipFile(local_path, "r") as zip_ref:
# Extract all files
zip_ref.extractall(self.extract_dir)
# Get list of all files
for file_path in zip_ref.namelist():
# Skip directories
if not file_path.endswith("/"):
extracted_files.append(self.extract_dir + "/" + file_path)
# Sort files for consistent output
extracted_files.sort()
# Build the markdown content
md_content = "Downloaded the following files:\n"
for file in extracted_files:
md_content += f"* {file}\n"
return DocumentConverterResult(title="Extracted Files", text_content=md_content.strip())
class ImageConverter(MediaConverter):
"""
Converts images to markdown via extraction of metadata (if `exiftool` is installed), OCR (if `easyocr` is installed), and description via a multimodal LLM (if an mlm_client is configured).
"""
def convert(self, local_path, **kwargs) -> Union[None, DocumentConverterResult]:
# Bail if not a XLSX
extension = kwargs.get("file_extension", "")
if extension.lower() not in [".jpg", ".jpeg", ".png"]:
return None
md_content = ""
# Add metadata
metadata = self._get_metadata(local_path)
if metadata:
for f in [
"ImageSize",
"Title",
"Caption",
"Description",
"Keywords",
"Artist",
"Author",
"DateTimeOriginal",
"CreateDate",
"GPSPosition",
]:
if f in metadata:
md_content += f"{f}: {metadata[f]}\n"
# Try describing the image with GPTV
mlm_client = kwargs.get("mlm_client")
mlm_model = kwargs.get("mlm_model")
if mlm_client is not None and mlm_model is not None:
md_content += (
"\n# Description:\n"
+ self._get_mlm_description(
local_path, extension, mlm_client, mlm_model, prompt=kwargs.get("mlm_prompt")
).strip()
+ "\n"
)
return DocumentConverterResult(
title=None,
text_content=md_content,
)
def _get_mlm_description(self, local_path, extension, client, model, prompt=None):
if prompt is None or prompt.strip() == "":
prompt = "Write a detailed caption for this image."
sys.stderr.write(f"MLM Prompt:\n{prompt}\n")
data_uri = ""
with open(local_path, "rb") as image_file:
content_type, encoding = mimetypes.guess_type("_dummy" + extension)
if content_type is None:
content_type = "image/jpeg"
image_base64 = base64.b64encode(image_file.read()).decode("utf-8")
data_uri = f"data:{content_type};base64,{image_base64}"
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {
"url": data_uri,
},
},
],
}
]
response = client.chat.completions.create(model=model, messages=messages)
return response.choices[0].message.content
class FileConversionException(Exception):
pass
class UnsupportedFormatException(Exception):
pass
class MarkdownConverter:
"""(In preview) An extremely simple text-based document reader, suitable for LLM use.
This reader will convert common file-types or webpages to Markdown."""
def __init__(
self,
requests_session: Optional[requests.Session] = None,
mlm_client: Optional[Any] = None,
mlm_model: Optional[Any] = None,
):
if requests_session is None:
self._requests_session = requests.Session()
else:
self._requests_session = requests_session
self._mlm_client = mlm_client
self._mlm_model = mlm_model
self._page_converters: List[DocumentConverter] = []
# Register converters for successful browsing operations
# Later registrations are tried first / take higher priority than earlier registrations
# To this end, the most specific converters should appear below the most generic converters
self.register_page_converter(PlainTextConverter())
self.register_page_converter(HtmlConverter())
self.register_page_converter(WikipediaConverter())
self.register_page_converter(YouTubeConverter())
self.register_page_converter(DocxConverter())
self.register_page_converter(XlsxConverter())
self.register_page_converter(PptxConverter())
self.register_page_converter(WavConverter())
self.register_page_converter(Mp3Converter())
self.register_page_converter(ImageConverter())
self.register_page_converter(ZipConverter())
self.register_page_converter(PdfConverter())
def convert(
self, source: Union[str, requests.Response], **kwargs: Any
) -> DocumentConverterResult: # TODO: deal with kwargs
"""
Args:
- source: can be a string representing a path or url, or a requests.response object
- extension: specifies the file extension to use when interpreting the file. If None, infer from source (path, uri, content-type, etc.)
"""
# Local path or url
if isinstance(source, str):
if source.startswith("http://") or source.startswith("https://") or source.startswith("file://"):
return self.convert_url(source, **kwargs)
else:
return self.convert_local(source, **kwargs)
# Request response
elif isinstance(source, requests.Response):
return self.convert_response(source, **kwargs)
def convert_local(self, path: str, **kwargs: Any) -> DocumentConverterResult: # TODO: deal with kwargs
# Prepare a list of extensions to try (in order of priority)
ext = kwargs.get("file_extension")
extensions = [ext] if ext is not None else []
# Get extension alternatives from the path and puremagic
base, ext = os.path.splitext(path)
self._append_ext(extensions, ext)
self._append_ext(extensions, self._guess_ext_magic(path))
# Convert
return self._convert(path, extensions, **kwargs)
# TODO what should stream's type be?
def convert_stream(self, stream: Any, **kwargs: Any) -> DocumentConverterResult: # TODO: deal with kwargs
# Prepare a list of extensions to try (in order of priority)
ext = kwargs.get("file_extension")
extensions = [ext] if ext is not None else []
# Save the file locally to a temporary file. It will be deleted before this method exits
handle, temp_path = tempfile.mkstemp()
fh = os.fdopen(handle, "wb")
result = None
try:
# Write to the temporary file
content = stream.read()
if isinstance(content, str):
fh.write(content.encode("utf-8"))
else:
fh.write(content)
fh.close()
# Use puremagic to check for more extension options
self._append_ext(extensions, self._guess_ext_magic(temp_path))
# Convert
result = self._convert(temp_path, extensions, **kwargs)
# Clean up
finally:
try:
fh.close()
except Exception:
pass
os.unlink(temp_path)
return result
def convert_url(self, url: str, **kwargs: Any) -> DocumentConverterResult: # TODO: fix kwargs type
# Send a HTTP request to the URL
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0"
response = self._requests_session.get(url, stream=True, headers={"User-Agent": user_agent})
response.raise_for_status()
return self.convert_response(response, **kwargs)
def convert_response(
self, response: requests.Response, **kwargs: Any
) -> DocumentConverterResult: # TODO fix kwargs type
# Prepare a list of extensions to try (in order of priority)
ext = kwargs.get("file_extension")
extensions = [ext] if ext is not None else []
# Guess from the mimetype
content_type = response.headers.get("content-type", "").split(";")[0]
self._append_ext(extensions, mimetypes.guess_extension(content_type))
# Read the content disposition if there is one
content_disposition = response.headers.get("content-disposition", "")
m = re.search(r"filename=([^;]+)", content_disposition)
if m:
base, ext = os.path.splitext(m.group(1).strip("\"'"))
self._append_ext(extensions, ext)
# Read from the extension from the path
base, ext = os.path.splitext(urlparse(response.url).path)
self._append_ext(extensions, ext)
# Save the file locally to a temporary file. It will be deleted before this method exits
handle, temp_path = tempfile.mkstemp()
fh = os.fdopen(handle, "wb")
result = None
try:
# Download the file
for chunk in response.iter_content(chunk_size=512):
fh.write(chunk)
fh.close()
# Use puremagic to check for more extension options
self._append_ext(extensions, self._guess_ext_magic(temp_path))
# Convert
result = self._convert(temp_path, extensions, url=response.url)
except Exception as e:
print(f"Error in converting: {e}")
# Clean up
finally:
try:
fh.close()
except Exception:
pass
os.unlink(temp_path)
return result
def _convert(self, local_path: str, extensions: List[Union[str, None]], **kwargs) -> DocumentConverterResult:
error_trace = ""
for ext in extensions + [None]: # Try last with no extension
for converter in self._page_converters:
_kwargs = copy.deepcopy(kwargs)
# Overwrite file_extension appropriately
if ext is None:
if "file_extension" in _kwargs:
del _kwargs["file_extension"]
else:
_kwargs.update({"file_extension": ext})
# Copy any additional global options
if "mlm_client" not in _kwargs and self._mlm_client is not None:
_kwargs["mlm_client"] = self._mlm_client
if "mlm_model" not in _kwargs and self._mlm_model is not None:
_kwargs["mlm_model"] = self._mlm_model
# If we hit an error log it and keep trying
try:
res = converter.convert(local_path, **_kwargs)
except Exception:
error_trace = ("\n\n" + traceback.format_exc()).strip()
if res is not None:
# Normalize the content
res.text_content = "\n".join([line.rstrip() for line in re.split(r"\r?\n", res.text_content)])
res.text_content = re.sub(r"\n{3,}", "\n\n", res.text_content)
# Todo
return res
# If we got this far without success, report any exceptions
if len(error_trace) > 0:
raise FileConversionException(
f"Could not convert '{local_path}' to Markdown. File type was recognized as {extensions}. While converting the file, the following error was encountered:\n\n{error_trace}"
)
# Nothing can handle it!
raise UnsupportedFormatException(
f"Could not convert '{local_path}' to Markdown. The formats {extensions} are not supported."
)
def _append_ext(self, extensions, ext):
"""Append a unique non-None, non-empty extension to a list of extensions."""
if ext is None:
return
ext = ext.strip()
if ext == "":
return
# if ext not in extensions:
if True:
extensions.append(ext)
def _guess_ext_magic(self, path):
"""Use puremagic (a Python implementation of libmagic) to guess a file's extension based on the first few bytes."""
# Use puremagic to guess
try:
guesses = puremagic.magic_file(path)
if len(guesses) > 0:
ext = guesses[0].extension.strip()
if len(ext) > 0:
return ext
except FileNotFoundError:
pass
except IsADirectoryError:
pass
except PermissionError:
pass
return None
def register_page_converter(self, converter: DocumentConverter) -> None:
"""Register a page text converter."""
self._page_converters.insert(0, converter)
| smolagents/examples/open_deep_research/scripts/mdconvert.py/0 | {
"file_path": "smolagents/examples/open_deep_research/scripts/mdconvert.py",
"repo_id": "smolagents",
"token_count": 16882
} |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from dataclasses import dataclass
from typing import Any, Dict, Optional
from .agent_types import AgentAudio
from .local_python_executor import (
BASE_BUILTIN_MODULES,
BASE_PYTHON_TOOLS,
evaluate_python_code,
)
from .tools import PipelineTool, Tool
@dataclass
class PreTool:
name: str
inputs: Dict[str, str]
output_type: type
task: str
description: str
repo_id: str
class PythonInterpreterTool(Tool):
name = "python_interpreter"
description = "This is a tool that evaluates python code. It can be used to perform calculations."
inputs = {
"code": {
"type": "string",
"description": "The python code to run in interpreter",
}
}
output_type = "string"
def __init__(self, *args, authorized_imports=None, **kwargs):
if authorized_imports is None:
self.authorized_imports = list(set(BASE_BUILTIN_MODULES))
else:
self.authorized_imports = list(set(BASE_BUILTIN_MODULES) | set(authorized_imports))
self.inputs = {
"code": {
"type": "string",
"description": (
"The code snippet to evaluate. All variables used in this snippet must be defined in this same snippet, "
f"else you will get an error. This code can only import the following python libraries: {authorized_imports}."
),
}
}
self.base_python_tools = BASE_PYTHON_TOOLS
self.python_evaluator = evaluate_python_code
super().__init__(*args, **kwargs)
def forward(self, code: str) -> str:
state = {}
output = str(
self.python_evaluator(
code,
state=state,
static_tools=self.base_python_tools,
authorized_imports=self.authorized_imports,
)[0] # The second element is boolean is_final_answer
)
return f"Stdout:\n{str(state['_print_outputs'])}\nOutput: {output}"
class FinalAnswerTool(Tool):
name = "final_answer"
description = "Provides a final answer to the given problem."
inputs = {"answer": {"type": "any", "description": "The final answer to the problem"}}
output_type = "any"
def forward(self, answer: Any) -> Any:
return answer
class UserInputTool(Tool):
name = "user_input"
description = "Asks for user's input on a specific question"
inputs = {"question": {"type": "string", "description": "The question to ask the user"}}
output_type = "string"
def forward(self, question):
user_input = input(f"{question} => Type your answer here:")
return user_input
class DuckDuckGoSearchTool(Tool):
name = "web_search"
description = """Performs a duckduckgo web search based on your query (think a Google search) then returns the top search results."""
inputs = {"query": {"type": "string", "description": "The search query to perform."}}
output_type = "string"
def __init__(self, max_results=10, **kwargs):
super().__init__()
self.max_results = max_results
try:
from duckduckgo_search import DDGS
except ImportError as e:
raise ImportError(
"You must install package `duckduckgo_search` to run this tool: for instance run `pip install duckduckgo-search`."
) from e
self.ddgs = DDGS(**kwargs)
def forward(self, query: str) -> str:
results = self.ddgs.text(query, max_results=self.max_results)
if len(results) == 0:
raise Exception("No results found! Try a less restrictive/shorter query.")
postprocessed_results = [f"[{result['title']}]({result['href']})\n{result['body']}" for result in results]
return "## Search Results\n\n" + "\n\n".join(postprocessed_results)
class GoogleSearchTool(Tool):
name = "web_search"
description = """Performs a google web search for your query then returns a string of the top search results."""
inputs = {
"query": {"type": "string", "description": "The search query to perform."},
"filter_year": {
"type": "integer",
"description": "Optionally restrict results to a certain year",
"nullable": True,
},
}
output_type = "string"
def __init__(self):
super().__init__(self)
import os
self.serpapi_key = os.getenv("SERPAPI_API_KEY")
def forward(self, query: str, filter_year: Optional[int] = None) -> str:
import requests
if self.serpapi_key is None:
raise ValueError("Missing SerpAPI key. Make sure you have 'SERPAPI_API_KEY' in your env variables.")
params = {
"engine": "google",
"q": query,
"api_key": self.serpapi_key,
"google_domain": "google.com",
}
if filter_year is not None:
params["tbs"] = f"cdr:1,cd_min:01/01/{filter_year},cd_max:12/31/{filter_year}"
response = requests.get("https://serpapi.com/search.json", params=params)
if response.status_code == 200:
results = response.json()
else:
raise ValueError(response.json())
if "organic_results" not in results.keys():
if filter_year is not None:
raise Exception(
f"No results found for query: '{query}' with filtering on year={filter_year}. Use a less restrictive query or do not filter on year."
)
else:
raise Exception(f"No results found for query: '{query}'. Use a less restrictive query.")
if len(results["organic_results"]) == 0:
year_filter_message = f" with filter year={filter_year}" if filter_year is not None else ""
return f"No results found for '{query}'{year_filter_message}. Try with a more general query, or remove the year filter."
web_snippets = []
if "organic_results" in results:
for idx, page in enumerate(results["organic_results"]):
date_published = ""
if "date" in page:
date_published = "\nDate published: " + page["date"]
source = ""
if "source" in page:
source = "\nSource: " + page["source"]
snippet = ""
if "snippet" in page:
snippet = "\n" + page["snippet"]
redacted_version = f"{idx}. [{page['title']}]({page['link']}){date_published}{source}\n{snippet}"
redacted_version = redacted_version.replace("Your browser can't play this video.", "")
web_snippets.append(redacted_version)
return "## Search Results\n" + "\n\n".join(web_snippets)
class VisitWebpageTool(Tool):
name = "visit_webpage"
description = (
"Visits a webpage at the given url and reads its content as a markdown string. Use this to browse webpages."
)
inputs = {
"url": {
"type": "string",
"description": "The url of the webpage to visit.",
}
}
output_type = "string"
def forward(self, url: str) -> str:
try:
import requests
from markdownify import markdownify
from requests.exceptions import RequestException
from smolagents.utils import truncate_content
except ImportError as e:
raise ImportError(
"You must install packages `markdownify` and `requests` to run this tool: for instance run `pip install markdownify requests`."
) from e
try:
# Send a GET request to the URL with a 20-second timeout
response = requests.get(url, timeout=20)
response.raise_for_status() # Raise an exception for bad status codes
# Convert the HTML content to Markdown
markdown_content = markdownify(response.text).strip()
# Remove multiple line breaks
markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content)
return truncate_content(markdown_content, 10000)
except requests.exceptions.Timeout:
return "The request timed out. Please try again later or check the URL."
except RequestException as e:
return f"Error fetching the webpage: {str(e)}"
except Exception as e:
return f"An unexpected error occurred: {str(e)}"
class SpeechToTextTool(PipelineTool):
default_checkpoint = "openai/whisper-large-v3-turbo"
description = "This is a tool that transcribes an audio into text. It returns the transcribed text."
name = "transcriber"
inputs = {
"audio": {
"type": "audio",
"description": "The audio to transcribe. Can be a local path, an url, or a tensor.",
}
}
output_type = "string"
def __new__(cls, *args, **kwargs):
from transformers.models.whisper import (
WhisperForConditionalGeneration,
WhisperProcessor,
)
cls.pre_processor_class = WhisperProcessor
cls.model_class = WhisperForConditionalGeneration
return super().__new__(cls, *args, **kwargs)
def encode(self, audio):
audio = AgentAudio(audio).to_raw()
return self.pre_processor(audio, return_tensors="pt")
def forward(self, inputs):
return self.model.generate(inputs["input_features"])
def decode(self, outputs):
return self.pre_processor.batch_decode(outputs, skip_special_tokens=True)[0]
TOOL_MAPPING = {
tool_class.name: tool_class
for tool_class in [
PythonInterpreterTool,
DuckDuckGoSearchTool,
VisitWebpageTool,
]
}
__all__ = [
"PythonInterpreterTool",
"FinalAnswerTool",
"UserInputTool",
"DuckDuckGoSearchTool",
"GoogleSearchTool",
"VisitWebpageTool",
"SpeechToTextTool",
]
| smolagents/src/smolagents/default_tools.py/0 | {
"file_path": "smolagents/src/smolagents/default_tools.py",
"repo_id": "smolagents",
"token_count": 4416
} |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from unittest.mock import MagicMock
import pytest
from transformers.testing_utils import get_tests_dir
from smolagents.agent_types import AgentImage, AgentText
from smolagents.agents import (
AgentMaxStepsError,
CodeAgent,
MultiStepAgent,
ToolCall,
ToolCallingAgent,
)
from smolagents.default_tools import PythonInterpreterTool
from smolagents.memory import PlanningStep
from smolagents.models import (
ChatMessage,
ChatMessageToolCall,
ChatMessageToolCallDefinition,
MessageRole,
TransformersModel,
)
from smolagents.tools import tool
from smolagents.utils import BASE_BUILTIN_MODULES
def get_new_path(suffix="") -> str:
directory = tempfile.mkdtemp()
return os.path.join(directory, str(uuid.uuid4()) + suffix)
class FakeToolCallModel:
def __call__(self, messages, tools_to_call_from=None, stop_sequences=None, grammar=None):
if len(messages) < 3:
return ChatMessage(
role="assistant",
content="",
tool_calls=[
ChatMessageToolCall(
id="call_0",
type="function",
function=ChatMessageToolCallDefinition(
name="python_interpreter", arguments={"code": "2*3.6452"}
),
)
],
)
else:
return ChatMessage(
role="assistant",
content="",
tool_calls=[
ChatMessageToolCall(
id="call_1",
type="function",
function=ChatMessageToolCallDefinition(name="final_answer", arguments={"answer": "7.2904"}),
)
],
)
class FakeToolCallModelImage:
def __call__(self, messages, tools_to_call_from=None, stop_sequences=None, grammar=None):
if len(messages) < 3:
return ChatMessage(
role="assistant",
content="",
tool_calls=[
ChatMessageToolCall(
id="call_0",
type="function",
function=ChatMessageToolCallDefinition(
name="fake_image_generation_tool",
arguments={"prompt": "An image of a cat"},
),
)
],
)
else:
return ChatMessage(
role="assistant",
content="",
tool_calls=[
ChatMessageToolCall(
id="call_1",
type="function",
function=ChatMessageToolCallDefinition(name="final_answer", arguments="image.png"),
)
],
)
class FakeToolCallModelVL:
def __call__(self, messages, tools_to_call_from=None, stop_sequences=None, grammar=None):
if len(messages) < 3:
return ChatMessage(
role="assistant",
content="",
tool_calls=[
ChatMessageToolCall(
id="call_0",
type="function",
function=ChatMessageToolCallDefinition(
name="fake_image_understanding_tool",
arguments={
"prompt": "What is in this image?",
"image": "image.png",
},
),
)
],
)
else:
return ChatMessage(
role="assistant",
content="",
tool_calls=[
ChatMessageToolCall(
id="call_1",
type="function",
function=ChatMessageToolCallDefinition(name="final_answer", arguments="The image is a cat."),
)
],
)
def fake_code_model(messages, stop_sequences=None, grammar=None) -> str:
prompt = str(messages)
if "special_marker" not in prompt:
return ChatMessage(
role="assistant",
content="""
Thought: I should multiply 2 by 3.6452. special_marker
Code:
```py
result = 2**3.6452
```<end_code>
""",
)
else: # We're at step 2
return ChatMessage(
role="assistant",
content="""
Thought: I can now answer the initial question
Code:
```py
final_answer(7.2904)
```<end_code>
""",
)
def fake_code_model_error(messages, stop_sequences=None) -> str:
prompt = str(messages)
if "special_marker" not in prompt:
return ChatMessage(
role="assistant",
content="""
Thought: I should multiply 2 by 3.6452. special_marker
Code:
```py
print("Flag!")
def error_function():
raise ValueError("error")
error_function()
```<end_code>
""",
)
else: # We're at step 2
return ChatMessage(
role="assistant",
content="""
Thought: I faced an error in the previous step.
Code:
```py
final_answer("got an error")
```<end_code>
""",
)
def fake_code_model_syntax_error(messages, stop_sequences=None) -> str:
prompt = str(messages)
if "special_marker" not in prompt:
return ChatMessage(
role="assistant",
content="""
Thought: I should multiply 2 by 3.6452. special_marker
Code:
```py
a = 2
b = a * 2
print("Failing due to unexpected indent")
print("Ok, calculation done!")
```<end_code>
""",
)
else: # We're at step 2
return ChatMessage(
role="assistant",
content="""
Thought: I can now answer the initial question
Code:
```py
final_answer("got an error")
```<end_code>
""",
)
def fake_code_model_import(messages, stop_sequences=None) -> str:
return ChatMessage(
role="assistant",
content="""
Thought: I can answer the question
Code:
```py
import numpy as np
final_answer("got an error")
```<end_code>
""",
)
def fake_code_functiondef(messages, stop_sequences=None) -> str:
prompt = str(messages)
if "special_marker" not in prompt:
return ChatMessage(
role="assistant",
content="""
Thought: Let's define the function. special_marker
Code:
```py
import numpy as np
def moving_average(x, w):
return np.convolve(x, np.ones(w), 'valid') / w
```<end_code>
""",
)
else: # We're at step 2
return ChatMessage(
role="assistant",
content="""
Thought: I can now answer the initial question
Code:
```py
x, w = [0, 1, 2, 3, 4, 5], 2
res = moving_average(x, w)
final_answer(res)
```<end_code>
""",
)
def fake_code_model_single_step(messages, stop_sequences=None, grammar=None) -> str:
return ChatMessage(
role="assistant",
content="""
Thought: I should multiply 2 by 3.6452. special_marker
Code:
```py
result = python_interpreter(code="2*3.6452")
final_answer(result)
```
""",
)
def fake_code_model_no_return(messages, stop_sequences=None, grammar=None) -> str:
return ChatMessage(
role="assistant",
content="""
Thought: I should multiply 2 by 3.6452. special_marker
Code:
```py
result = python_interpreter(code="2*3.6452")
print(result)
```
""",
)
class AgentTests(unittest.TestCase):
def test_fake_toolcalling_agent(self):
agent = ToolCallingAgent(tools=[PythonInterpreterTool()], model=FakeToolCallModel())
output = agent.run("What is 2 multiplied by 3.6452?")
assert isinstance(output, str)
assert "7.2904" in output
assert agent.memory.steps[0].task == "What is 2 multiplied by 3.6452?"
assert "7.2904" in agent.memory.steps[1].observations
assert agent.memory.steps[2].model_output is None
def test_toolcalling_agent_handles_image_tool_outputs(self):
from PIL import Image
@tool
def fake_image_generation_tool(prompt: str) -> Image.Image:
"""Tool that generates an image.
Args:
prompt: The prompt
"""
return Image.open(Path(get_tests_dir("fixtures")) / "000000039769.png")
agent = ToolCallingAgent(tools=[fake_image_generation_tool], model=FakeToolCallModelImage())
output = agent.run("Make me an image.")
assert isinstance(output, AgentImage)
assert isinstance(agent.state["image.png"], Image.Image)
def test_toolcalling_agent_handles_image_inputs(self):
from PIL import Image
image = Image.open(Path(get_tests_dir("fixtures")) / "000000039769.png") # dummy input
@tool
def fake_image_understanding_tool(prompt: str, image: Image.Image) -> str:
"""Tool that creates a caption for an image.
Args:
prompt: The prompt
image: The image
"""
return "The image is a cat."
agent = ToolCallingAgent(tools=[fake_image_understanding_tool], model=FakeToolCallModelVL())
output = agent.run("Caption this image.", images=[image])
assert output == "The image is a cat."
def test_fake_code_agent(self):
agent = CodeAgent(tools=[PythonInterpreterTool()], model=fake_code_model)
output = agent.run("What is 2 multiplied by 3.6452?")
assert isinstance(output, float)
assert output == 7.2904
assert agent.memory.steps[0].task == "What is 2 multiplied by 3.6452?"
assert agent.memory.steps[2].tool_calls == [
ToolCall(name="python_interpreter", arguments="final_answer(7.2904)", id="call_2")
]
def test_additional_args_added_to_task(self):
agent = CodeAgent(tools=[], model=fake_code_model)
agent.run(
"What is 2 multiplied by 3.6452?",
additional_args={"instruction": "Remember this."},
)
assert "Remember this" in agent.task
assert "Remember this" in str(agent.input_messages)
def test_reset_conversations(self):
agent = CodeAgent(tools=[PythonInterpreterTool()], model=fake_code_model)
output = agent.run("What is 2 multiplied by 3.6452?", reset=True)
assert output == 7.2904
assert len(agent.memory.steps) == 3
output = agent.run("What is 2 multiplied by 3.6452?", reset=False)
assert output == 7.2904
assert len(agent.memory.steps) == 5
output = agent.run("What is 2 multiplied by 3.6452?", reset=True)
assert output == 7.2904
assert len(agent.memory.steps) == 3
def test_code_agent_code_errors_show_offending_line_and_error(self):
agent = CodeAgent(tools=[PythonInterpreterTool()], model=fake_code_model_error)
output = agent.run("What is 2 multiplied by 3.6452?")
assert isinstance(output, AgentText)
assert output == "got an error"
assert "Code execution failed at line 'error_function()'" in str(agent.memory.steps[1].error)
assert "ValueError" in str(agent.memory.steps)
def test_code_agent_code_error_saves_previous_print_outputs(self):
agent = CodeAgent(tools=[PythonInterpreterTool()], model=fake_code_model_error)
agent.run("What is 2 multiplied by 3.6452?")
assert "Flag!" in str(agent.memory.steps[1].observations)
def test_code_agent_syntax_error_show_offending_lines(self):
agent = CodeAgent(tools=[PythonInterpreterTool()], model=fake_code_model_syntax_error)
output = agent.run("What is 2 multiplied by 3.6452?")
assert isinstance(output, AgentText)
assert output == "got an error"
assert ' print("Failing due to unexpected indent")' in str(agent.memory.steps)
def test_setup_agent_with_empty_toolbox(self):
ToolCallingAgent(model=FakeToolCallModel(), tools=[])
def test_fails_max_steps(self):
agent = CodeAgent(
tools=[PythonInterpreterTool()],
model=fake_code_model_no_return, # use this callable because it never ends
max_steps=5,
)
answer = agent.run("What is 2 multiplied by 3.6452?")
assert len(agent.memory.steps) == 7 # Task step + 5 action steps + Final answer
assert type(agent.memory.steps[-1].error) is AgentMaxStepsError
assert isinstance(answer, str)
def test_tool_descriptions_get_baked_in_system_prompt(self):
tool = PythonInterpreterTool()
tool.name = "fake_tool_name"
tool.description = "fake_tool_description"
agent = CodeAgent(tools=[tool], model=fake_code_model)
agent.run("Empty task")
assert tool.name in agent.system_prompt
assert tool.description in agent.system_prompt
def test_module_imports_get_baked_in_system_prompt(self):
agent = CodeAgent(tools=[], model=fake_code_model)
agent.run("Empty task")
for module in BASE_BUILTIN_MODULES:
assert module in agent.system_prompt
def test_init_agent_with_different_toolsets(self):
toolset_1 = []
agent = CodeAgent(tools=toolset_1, model=fake_code_model)
assert len(agent.tools) == 1 # when no tools are provided, only the final_answer tool is added by default
toolset_2 = [PythonInterpreterTool(), PythonInterpreterTool()]
agent = CodeAgent(tools=toolset_2, model=fake_code_model)
assert (
len(agent.tools) == 2
) # deduplication of tools, so only one python_interpreter tool is added in addition to final_answer
# check that python_interpreter base tool does not get added to CodeAgent
agent = CodeAgent(tools=[], model=fake_code_model, add_base_tools=True)
assert len(agent.tools) == 3 # added final_answer tool + search + visit_webpage
# check that python_interpreter base tool gets added to ToolCallingAgent
agent = ToolCallingAgent(tools=[], model=fake_code_model, add_base_tools=True)
assert len(agent.tools) == 4 # added final_answer tool + search + visit_webpage
def test_function_persistence_across_steps(self):
agent = CodeAgent(
tools=[],
model=fake_code_functiondef,
max_steps=2,
additional_authorized_imports=["numpy"],
)
res = agent.run("ok")
assert res[0] == 0.5
def test_init_managed_agent(self):
agent = CodeAgent(tools=[], model=fake_code_functiondef, name="managed_agent", description="Empty")
assert agent.name == "managed_agent"
assert agent.description == "Empty"
def test_agent_description_gets_correctly_inserted_in_system_prompt(self):
managed_agent = CodeAgent(tools=[], model=fake_code_functiondef, name="managed_agent", description="Empty")
manager_agent = CodeAgent(
tools=[],
model=fake_code_functiondef,
managed_agents=[managed_agent],
)
assert "You can also give tasks to team members." not in managed_agent.system_prompt
assert "{{managed_agents_descriptions}}" not in managed_agent.system_prompt
assert "You can also give tasks to team members." in manager_agent.system_prompt
def test_code_agent_missing_import_triggers_advice_in_error_log(self):
# Set explicit verbosity level to 1 to override the default verbosity level of -1 set in CI fixture
agent = CodeAgent(tools=[], model=fake_code_model_import, verbosity_level=1)
with agent.logger.console.capture() as capture:
agent.run("Count to 3")
str_output = capture.get()
assert "`additional_authorized_imports`" in str_output.replace("\n", "")
def test_multiagents(self):
class FakeModelMultiagentsManagerAgent:
model_id = "fake_model"
def __call__(
self,
messages,
stop_sequences=None,
grammar=None,
tools_to_call_from=None,
):
if tools_to_call_from is not None:
if len(messages) < 3:
return ChatMessage(
role="assistant",
content="",
tool_calls=[
ChatMessageToolCall(
id="call_0",
type="function",
function=ChatMessageToolCallDefinition(
name="search_agent",
arguments="Who is the current US president?",
),
)
],
)
else:
assert "Report on the current US president" in str(messages)
return ChatMessage(
role="assistant",
content="",
tool_calls=[
ChatMessageToolCall(
id="call_0",
type="function",
function=ChatMessageToolCallDefinition(
name="final_answer", arguments="Final report."
),
)
],
)
else:
if len(messages) < 3:
return ChatMessage(
role="assistant",
content="""
Thought: Let's call our search agent.
Code:
```py
result = search_agent("Who is the current US president?")
```<end_code>
""",
)
else:
assert "Report on the current US president" in str(messages)
return ChatMessage(
role="assistant",
content="""
Thought: Let's return the report.
Code:
```py
final_answer("Final report.")
```<end_code>
""",
)
manager_model = FakeModelMultiagentsManagerAgent()
class FakeModelMultiagentsManagedAgent:
model_id = "fake_model"
def __call__(
self,
messages,
tools_to_call_from=None,
stop_sequences=None,
grammar=None,
):
return ChatMessage(
role="assistant",
content="",
tool_calls=[
ChatMessageToolCall(
id="call_0",
type="function",
function=ChatMessageToolCallDefinition(
name="final_answer",
arguments="Report on the current US president",
),
)
],
)
managed_model = FakeModelMultiagentsManagedAgent()
web_agent = ToolCallingAgent(
tools=[],
model=managed_model,
max_steps=10,
name="search_agent",
description="Runs web searches for you. Give it your request as an argument. Make the request as detailed as needed, you can ask for thorough reports",
)
manager_code_agent = CodeAgent(
tools=[],
model=manager_model,
managed_agents=[web_agent],
additional_authorized_imports=["time", "numpy", "pandas"],
)
report = manager_code_agent.run("Fake question.")
assert report == "Final report."
manager_toolcalling_agent = ToolCallingAgent(
tools=[],
model=manager_model,
managed_agents=[web_agent],
)
report = manager_toolcalling_agent.run("Fake question.")
assert report == "Final report."
# Test that visualization works
manager_code_agent.visualize()
def test_code_nontrivial_final_answer_works(self):
def fake_code_model_final_answer(messages, stop_sequences=None, grammar=None):
return ChatMessage(
role="assistant",
content="""Code:
```py
def nested_answer():
final_answer("Correct!")
nested_answer()
```<end_code>""",
)
agent = CodeAgent(tools=[], model=fake_code_model_final_answer)
output = agent.run("Count to 3")
assert output == "Correct!"
def test_transformers_toolcalling_agent(self):
@tool
def weather_api(location: str, celsius: bool = False) -> str:
"""
Gets the weather in the next days at given location.
Secretly this tool does not care about the location, it hates the weather everywhere.
Args:
location: the location
celsius: the temperature type
"""
return "The weather is UNGODLY with torrential rains and temperatures below -10°C"
model = TransformersModel(
model_id="HuggingFaceTB/SmolLM2-360M-Instruct",
max_new_tokens=100,
device_map="auto",
do_sample=False,
)
agent = ToolCallingAgent(model=model, tools=[weather_api], max_steps=1)
agent.run("What's the weather in Paris?")
assert agent.memory.steps[0].task == "What's the weather in Paris?"
assert agent.memory.steps[1].tool_calls[0].name == "weather_api"
step_memory_dict = agent.memory.get_succinct_steps()[1]
assert step_memory_dict["model_output_message"].tool_calls[0].function.name == "weather_api"
assert step_memory_dict["model_output_message"].raw["completion_kwargs"]["max_new_tokens"] == 100
assert "model_input_messages" in agent.memory.get_full_steps()[1]
def test_final_answer_checks(self):
def check_always_fails(final_answer, agent_memory):
assert False, "Error raised in check"
agent = CodeAgent(model=fake_code_model, tools=[], final_answer_checks=[check_always_fails])
agent.run("Dummy task.")
assert "Error raised in check" in str(agent.write_memory_to_messages())
class TestMultiStepAgent:
def test_instantiation_disables_logging_to_terminal(self):
fake_model = MagicMock()
agent = MultiStepAgent(tools=[], model=fake_model)
assert agent.logger.level == -1, "logging to terminal should be disabled for testing using a fixture"
def test_instantiation_with_prompt_templates(self, prompt_templates):
agent = MultiStepAgent(tools=[], model=MagicMock(), prompt_templates=prompt_templates)
assert agent.prompt_templates == prompt_templates
assert agent.prompt_templates["system_prompt"] == "This is a test system prompt."
assert "managed_agent" in agent.prompt_templates
assert agent.prompt_templates["managed_agent"]["task"] == "Task for {{name}}: {{task}}"
assert agent.prompt_templates["managed_agent"]["report"] == "Report for {{name}}: {{final_answer}}"
def test_step_number(self):
fake_model = MagicMock()
fake_model.last_input_token_count = 10
fake_model.last_output_token_count = 20
max_steps = 2
agent = MultiStepAgent(tools=[], model=fake_model, max_steps=max_steps)
assert hasattr(agent, "step_number"), "step_number attribute should be defined"
assert agent.step_number == 0, "step_number should be initialized to 0"
agent.run("Test task")
assert hasattr(agent, "step_number"), "step_number attribute should be defined"
assert agent.step_number == max_steps + 1, "step_number should be max_steps + 1 after run method is called"
def test_planning_step_first_step(self):
fake_model = MagicMock()
agent = CodeAgent(
tools=[],
model=fake_model,
)
task = "Test task"
agent.planning_step(task, is_first_step=True, step=0)
assert len(agent.memory.steps) == 1
planning_step = agent.memory.steps[0]
assert isinstance(planning_step, PlanningStep)
messages = planning_step.model_input_messages
assert isinstance(messages, list)
assert len(messages) == 1
for message in messages:
assert isinstance(message, dict)
assert "role" in message
assert "content" in message
assert isinstance(message["role"], MessageRole)
assert isinstance(message["content"], list)
assert len(message["content"]) == 1
for content in message["content"]:
assert isinstance(content, dict)
assert "type" in content
assert "text" in content
# Test calls to model
assert len(fake_model.call_args_list) == 2
for call_args in fake_model.call_args_list:
assert len(call_args.args) == 1
messages = call_args.args[0]
assert isinstance(messages, list)
assert len(messages) == 1
for message in messages:
assert isinstance(message, dict)
assert "role" in message
assert "content" in message
assert isinstance(message["role"], MessageRole)
assert isinstance(message["content"], list)
assert len(message["content"]) == 1
for content in message["content"]:
assert isinstance(content, dict)
assert "type" in content
assert "text" in content
@pytest.fixture
def prompt_templates():
return {
"system_prompt": "This is a test system prompt.",
"managed_agent": {"task": "Task for {{name}}: {{task}}", "report": "Report for {{name}}: {{final_answer}}"},
}
| smolagents/tests/test_agents.py/0 | {
"file_path": "smolagents/tests/test_agents.py",
"repo_id": "smolagents",
"token_count": 12554
} |
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
- id: check-yaml
- id: end-of-file-fixer
exclude: crate-hashes.json
- id: trailing-whitespace
exclude: docs/source/reference/launcher.md
- repo: https://github.com/psf/black
rev: 24.2.0
hooks:
- id: black
- repo: https://github.com/doublify/pre-commit-rust
rev: v1.0
hooks:
- id: cargo-check
- id: fmt
- id: clippy
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.3.0
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
| text-generation-inference/.pre-commit-config.yaml/0 | {
"file_path": "text-generation-inference/.pre-commit-config.yaml",
"repo_id": "text-generation-inference",
"token_count": 314
} |
{
"__inputs": [
{
"name": "DS_PROMETHEUS_EKS API INFERENCE PROD",
"label": "Prometheus EKS API Inference Prod",
"description": "",
"type": "datasource",
"pluginId": "prometheus",
"pluginName": "Prometheus"
}
],
"__elements": {},
"__requires": [
{
"type": "panel",
"id": "gauge",
"name": "Gauge",
"version": ""
},
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "10.0.2"
},
{
"type": "panel",
"id": "heatmap",
"name": "Heatmap",
"version": ""
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "1.0.0"
},
{
"type": "panel",
"id": "timeseries",
"name": "Time series",
"version": ""
}
],
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 2,
"id": 551,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"fieldMinMax": false,
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 1000
}
]
},
"unit": "ms"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 0
},
"id": 49,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"mean"
],
"fields": "",
"values": false
},
"showPercentChange": false,
"textMode": "auto",
"wideLayout": true
},
"pluginVersion": "10.4.2",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "(histogram_quantile(0.5, sum by (le) (rate(tgi_request_queue_duration_bucket{container=\"$service\"}[10m]))) * 1000) > 0",
"hide": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "(histogram_quantile(0.5, sum by (le) (rate(tgi_batch_inference_duration_bucket{method=\"prefill\", container=\"$service\"}[10m]))) * 1000) > 0",
"hide": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "C"
},
{
"datasource": {
"name": "Expression",
"type": "__expr__",
"uid": "__expr__"
},
"expression": "$B + $C",
"hide": false,
"refId": "D",
"type": "math"
}
],
"title": "Time to first token",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "ms"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 9,
"y": 0
},
"id": 44,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"mean"
],
"fields": "",
"values": false
},
"showPercentChange": false,
"textMode": "auto",
"wideLayout": true
},
"pluginVersion": "10.4.2",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "(histogram_quantile(0.5, sum by (le) (rate(tgi_batch_forward_duration_bucket{method=\"decode\", container=\"$service\"}[10m]))) * 1000)>0",
"instant": false,
"range": true,
"refId": "A"
}
],
"title": "Decode per-token latency",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 7,
"x": 17,
"y": 0
},
"id": 45,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"mean"
],
"fields": "",
"values": false
},
"showPercentChange": false,
"textMode": "auto",
"wideLayout": true
},
"pluginVersion": "10.4.2",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "sum((rate(tgi_request_generated_tokens_sum{container=\"$service\"}[10m]) / rate(tgi_request_generated_tokens_count{container=\"$service\"}[10m]))>0)",
"instant": false,
"range": true,
"refId": "A"
}
],
"title": "Throughput (generated tok/s)",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "none"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 7
},
"id": 48,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_request_input_length_bucket{container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_request_input_length_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_request_input_length_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Number of tokens per prompt",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "none"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 7
},
"id": 30,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_request_generated_tokens_bucket{container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_request_generated_tokens_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_request_generated_tokens_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Number of generated tokens per request",
"type": "timeseries"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 15
},
"id": 20,
"panels": [],
"title": "General",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 30,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 0,
"y": 16
},
"id": 4,
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "sum(increase(tgi_request_success{container=\"$service\"}[1m]))",
"legendFormat": "Success",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "sum(increase(tgi_request_failure{container=\"$service\"}[1m])) by (err)",
"hide": false,
"legendFormat": "Error: {{err}}",
"range": true,
"refId": "B"
}
],
"title": "Requests",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 13,
"w": 9,
"x": 6,
"y": 16
},
"id": 6,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_request_mean_time_per_token_duration_bucket{container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_request_mean_time_per_token_duration_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_request_mean_time_per_token_duration_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Mean Time Per Token quantiles",
"type": "timeseries"
},
{
"cards": {},
"color": {
"cardColor": "#5794F2",
"colorScale": "linear",
"colorScheme": "interpolateSpectral",
"exponent": 0.5,
"min": 0,
"mode": "opacity"
},
"dataFormat": "tsbuckets",
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 13,
"w": 9,
"x": 15,
"y": 16
},
"heatmap": {},
"hideZeroBuckets": false,
"highlightCards": true,
"id": 13,
"legend": {
"show": false
},
"maxDataPoints": 25,
"options": {
"calculate": false,
"calculation": {},
"cellGap": 2,
"cellValues": {},
"color": {
"exponent": 0.5,
"fill": "#5794F2",
"min": 0,
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Spectral",
"steps": 128
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": false
},
"rowsFrame": {
"layout": "auto"
},
"showValue": "never",
"tooltip": {
"mode": "single",
"showColorScale": false,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"decimals": 1,
"reverse": false,
"unit": "s"
}
},
"pluginVersion": "10.4.2",
"reverseYBuckets": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"exemplar": true,
"expr": "sum(increase(tgi_request_mean_time_per_token_duration_bucket{container=\"$service\"}[5m])) by (le)",
"format": "heatmap",
"interval": "",
"legendFormat": "{{ le }}",
"range": true,
"refId": "A"
}
],
"title": "Mean Time Per Token",
"tooltip": {
"show": true,
"showHistogram": false
},
"type": "heatmap",
"xAxis": {
"show": true
},
"yAxis": {
"decimals": 1,
"format": "s",
"logBase": 1,
"show": true
},
"yBucketBound": "auto"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "orange",
"value": 70
},
{
"color": "red",
"value": 85
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 5,
"w": 3,
"x": 0,
"y": 24
},
"id": 18,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": false
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "count(tgi_request_count{container=\"$service\"})",
"legendFormat": "Replicas",
"range": true,
"refId": "A"
}
],
"title": "Number of replicas",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "orange",
"value": 70
},
{
"color": "red",
"value": 85
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 5,
"w": 3,
"x": 3,
"y": 24
},
"id": 32,
"options": {
"minVizHeight": 75,
"minVizWidth": 75,
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true,
"sizing": "auto"
},
"pluginVersion": "10.4.2",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "sum(tgi_queue_size{container=\"$service\"})",
"legendFormat": "__auto",
"range": true,
"refId": "A"
}
],
"title": "Queue Size",
"type": "gauge"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 29
},
"id": 26,
"panels": [],
"title": "Batching",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "bars",
"fillOpacity": 50,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 5,
"w": 6,
"x": 0,
"y": 30
},
"id": 29,
"maxDataPoints": 40,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": false
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "avg(tgi_batch_current_max_tokens{container=\"$service\"})",
"legendFormat": "{{ pod }}",
"range": true,
"refId": "A"
}
],
"title": "Max tokens per batch",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "none"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 9,
"w": 4,
"x": 6,
"y": 30
},
"id": 33,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_request_skipped_tokens_bucket{container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_request_skipped_tokens_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_request_skipped_tokens_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Speculated Tokens",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "none"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 9,
"w": 5,
"x": 10,
"y": 30
},
"id": 46,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_request_input_length_bucket{container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_request_input_length_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_request_input_length_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Prompt Tokens",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 9,
"w": 9,
"x": 15,
"y": 30
},
"id": 8,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_request_duration_bucket{container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_request_duration_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_request_duration_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Latency quantiles",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "bars",
"fillOpacity": 50,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 6,
"x": 0,
"y": 35
},
"id": 27,
"maxDataPoints": 40,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": false
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "avg(tgi_batch_current_size{container=\"$service\"})",
"legendFormat": "{{ pod }}",
"range": true,
"refId": "A"
}
],
"title": "Batch Size",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 30,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 6,
"x": 0,
"y": 39
},
"id": 28,
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "sum(increase(tgi_batch_concat{container=\"$service\"}[1m])) by (reason)",
"hide": false,
"legendFormat": "Reason: {{ reason }}",
"range": true,
"refId": "B"
}
],
"title": "Concatenates",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 9,
"w": 9,
"x": 6,
"y": 39
},
"id": 31,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_request_queue_duration_bucket{container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_request_queue_duration_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_request_queue_duration_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Queue quantiles",
"type": "timeseries"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 48
},
"id": 22,
"panels": [],
"title": "Prefill",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 11,
"w": 12,
"x": 0,
"y": 49
},
"id": 7,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_batch_inference_duration_bucket{method=\"prefill\", container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_batch_inference_duration_bucket{method=\"prefill\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_batch_inference_duration_bucket{method=\"prefill\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Prefill Quantiles",
"type": "timeseries"
},
{
"cards": {},
"color": {
"cardColor": "#5794F2",
"colorScale": "linear",
"colorScheme": "interpolateSpectral",
"exponent": 0.5,
"min": 0,
"mode": "opacity"
},
"dataFormat": "tsbuckets",
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 11,
"w": 12,
"x": 12,
"y": 49
},
"heatmap": {},
"hideZeroBuckets": false,
"highlightCards": true,
"id": 14,
"legend": {
"show": false
},
"maxDataPoints": 25,
"options": {
"calculate": false,
"calculation": {},
"cellGap": 2,
"cellValues": {},
"color": {
"exponent": 0.5,
"fill": "#5794F2",
"min": 0,
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Spectral",
"steps": 128
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": false
},
"rowsFrame": {
"layout": "auto"
},
"showValue": "never",
"tooltip": {
"mode": "single",
"showColorScale": false,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"decimals": 1,
"reverse": false,
"unit": "s"
}
},
"pluginVersion": "10.4.2",
"reverseYBuckets": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"exemplar": true,
"expr": "sum(increase(tgi_batch_inference_duration_bucket{method=\"prefill\", container=\"$service\"}[5m])) by (le)",
"format": "heatmap",
"interval": "",
"legendFormat": "{{ le }}",
"range": true,
"refId": "A"
}
],
"title": "Prefill Latency",
"tooltip": {
"show": true,
"showHistogram": false
},
"type": "heatmap",
"xAxis": {
"show": true
},
"yAxis": {
"decimals": 1,
"format": "s",
"logBase": 1,
"show": true
},
"yBucketBound": "auto"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 60
},
"id": 24,
"panels": [],
"title": "Decode",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 11,
"w": 12,
"x": 0,
"y": 61
},
"id": 11,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_batch_inference_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_batch_inference_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_batch_inference_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Decode quantiles",
"type": "timeseries"
},
{
"cards": {},
"color": {
"cardColor": "#5794F2",
"colorScale": "linear",
"colorScheme": "interpolateSpectral",
"exponent": 0.5,
"min": 0,
"mode": "opacity"
},
"dataFormat": "tsbuckets",
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 11,
"w": 12,
"x": 12,
"y": 61
},
"heatmap": {},
"hideZeroBuckets": false,
"highlightCards": true,
"id": 15,
"legend": {
"show": false
},
"maxDataPoints": 25,
"options": {
"calculate": false,
"calculation": {},
"cellGap": 2,
"cellValues": {},
"color": {
"exponent": 0.5,
"fill": "#5794F2",
"min": 0,
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Spectral",
"steps": 128
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": false
},
"rowsFrame": {
"layout": "auto"
},
"showValue": "never",
"tooltip": {
"mode": "single",
"showColorScale": false,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"decimals": 1,
"reverse": false,
"unit": "s"
}
},
"pluginVersion": "10.4.2",
"reverseYBuckets": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"exemplar": true,
"expr": "sum(increase(tgi_batch_inference_duration_bucket{method=\"decode\", container=\"$service\"}[5m])) by (le)",
"format": "heatmap",
"interval": "",
"legendFormat": "{{ le }}",
"range": true,
"refId": "A"
}
],
"title": "Decode Latency",
"tooltip": {
"show": true,
"showHistogram": false
},
"type": "heatmap",
"xAxis": {
"show": true
},
"yAxis": {
"decimals": 1,
"format": "s",
"logBase": 1,
"show": true
},
"yBucketBound": "auto"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 72
},
"id": 43,
"panels": [],
"title": "Debug",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 11,
"w": 6,
"x": 0,
"y": 73
},
"id": 38,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_batch_forward_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_batch_forward_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_batch_forward_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Forward quantiles",
"type": "timeseries"
},
{
"cards": {},
"color": {
"cardColor": "#5794F2",
"colorScale": "linear",
"colorScheme": "interpolateSpectral",
"exponent": 0.5,
"min": 0,
"mode": "opacity"
},
"dataFormat": "tsbuckets",
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 11,
"w": 6,
"x": 6,
"y": 73
},
"heatmap": {},
"hideZeroBuckets": false,
"highlightCards": true,
"id": 35,
"legend": {
"show": false
},
"maxDataPoints": 25,
"options": {
"calculate": false,
"calculation": {},
"cellGap": 2,
"cellValues": {},
"color": {
"exponent": 0.5,
"fill": "#5794F2",
"min": 0,
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Spectral",
"steps": 128
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": false
},
"rowsFrame": {
"layout": "auto"
},
"showValue": "never",
"tooltip": {
"mode": "single",
"showColorScale": false,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"decimals": 1,
"reverse": false,
"unit": "s"
}
},
"pluginVersion": "10.4.2",
"reverseYBuckets": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"exemplar": true,
"expr": "sum(increase(tgi_batch_forward_duration_bucket{method=\"decode\", container=\"$service\"}[5m])) by (le)",
"format": "heatmap",
"interval": "",
"legendFormat": "{{ le }}",
"range": true,
"refId": "A"
}
],
"title": "Forward Latency",
"tooltip": {
"show": true,
"showHistogram": false
},
"type": "heatmap",
"xAxis": {
"show": true
},
"yAxis": {
"decimals": 1,
"format": "s",
"logBase": 1,
"show": true
},
"yBucketBound": "auto"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 11,
"w": 6,
"x": 12,
"y": 73
},
"id": 34,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_batch_decode_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_batch_decode_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_batch_decode_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Token Decode quantiles",
"type": "timeseries"
},
{
"cards": {},
"color": {
"cardColor": "#5794F2",
"colorScale": "linear",
"colorScheme": "interpolateSpectral",
"exponent": 0.5,
"min": 0,
"mode": "opacity"
},
"dataFormat": "tsbuckets",
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 11,
"w": 6,
"x": 18,
"y": 73
},
"heatmap": {},
"hideZeroBuckets": false,
"highlightCards": true,
"id": 40,
"legend": {
"show": false
},
"maxDataPoints": 25,
"options": {
"calculate": false,
"calculation": {},
"cellGap": 2,
"cellValues": {},
"color": {
"exponent": 0.5,
"fill": "#5794F2",
"min": 0,
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Spectral",
"steps": 128
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": false
},
"rowsFrame": {
"layout": "auto"
},
"showValue": "never",
"tooltip": {
"mode": "single",
"showColorScale": false,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"decimals": 1,
"reverse": false,
"unit": "s"
}
},
"pluginVersion": "10.4.2",
"reverseYBuckets": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"exemplar": true,
"expr": "sum(increase(tgi_batch_decode_duration_bucket{method=\"decode\", container=\"$service\"}[5m])) by (le)",
"format": "heatmap",
"interval": "",
"legendFormat": "{{ le }}",
"range": true,
"refId": "A"
}
],
"title": "Token Decode Latency",
"tooltip": {
"show": true,
"showHistogram": false
},
"type": "heatmap",
"xAxis": {
"show": true
},
"yAxis": {
"decimals": 1,
"format": "s",
"logBase": 1,
"show": true
},
"yBucketBound": "auto"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 11,
"w": 6,
"x": 0,
"y": 84
},
"id": 42,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_batch_filter_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_batch_filter_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_batch_filter_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Filter Batch quantiles",
"type": "timeseries"
},
{
"cards": {},
"color": {
"cardColor": "#5794F2",
"colorScale": "linear",
"colorScheme": "interpolateSpectral",
"exponent": 0.5,
"min": 0,
"mode": "opacity"
},
"dataFormat": "tsbuckets",
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 11,
"w": 6,
"x": 6,
"y": 84
},
"heatmap": {},
"hideZeroBuckets": false,
"highlightCards": true,
"id": 39,
"legend": {
"show": false
},
"maxDataPoints": 25,
"options": {
"calculate": false,
"calculation": {},
"cellGap": 2,
"cellValues": {},
"color": {
"exponent": 0.5,
"fill": "#5794F2",
"min": 0,
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Spectral",
"steps": 128
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": false
},
"rowsFrame": {
"layout": "auto"
},
"showValue": "never",
"tooltip": {
"mode": "single",
"showColorScale": false,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"decimals": 1,
"reverse": false,
"unit": "s"
}
},
"pluginVersion": "10.4.2",
"reverseYBuckets": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"exemplar": true,
"expr": "sum(increase(tgi_batch_filter_duration_bucket{method=\"decode\", container=\"$service\"}[5m])) by (le)",
"format": "heatmap",
"interval": "",
"legendFormat": "{{ le }}",
"range": true,
"refId": "A"
}
],
"title": "Filter Batch Latency",
"tooltip": {
"show": true,
"showHistogram": false
},
"type": "heatmap",
"xAxis": {
"show": true
},
"yAxis": {
"decimals": 1,
"format": "s",
"logBase": 1,
"show": true
},
"yBucketBound": "auto"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 11,
"w": 6,
"x": 12,
"y": 84
},
"id": 36,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_batch_concat_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_batch_concat_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_batch_concat_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Batch Concat quantiles",
"type": "timeseries"
},
{
"cards": {},
"color": {
"cardColor": "#5794F2",
"colorScale": "linear",
"colorScheme": "interpolateSpectral",
"exponent": 0.5,
"min": 0,
"mode": "opacity"
},
"dataFormat": "tsbuckets",
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 11,
"w": 6,
"x": 18,
"y": 84
},
"heatmap": {},
"hideZeroBuckets": false,
"highlightCards": true,
"id": 41,
"legend": {
"show": false
},
"maxDataPoints": 25,
"options": {
"calculate": false,
"calculation": {},
"cellGap": 2,
"cellValues": {},
"color": {
"exponent": 0.5,
"fill": "#5794F2",
"min": 0,
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Spectral",
"steps": 128
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": false
},
"rowsFrame": {
"layout": "auto"
},
"showValue": "never",
"tooltip": {
"mode": "single",
"showColorScale": false,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"decimals": 1,
"reverse": false,
"unit": "s"
}
},
"pluginVersion": "10.4.2",
"reverseYBuckets": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"exemplar": true,
"expr": "sum(increase(tgi_batch_concat_duration_bucket{method=\"decode\", container=\"$service\"}[5m])) by (le)",
"format": "heatmap",
"interval": "",
"legendFormat": "{{ le }}",
"range": true,
"refId": "A"
}
],
"title": "Batch Concat latency",
"tooltip": {
"show": true,
"showHistogram": false
},
"type": "heatmap",
"xAxis": {
"show": true
},
"yAxis": {
"decimals": 1,
"format": "s",
"logBase": 1,
"show": true
},
"yBucketBound": "auto"
}
],
"refresh": "",
"schemaVersion": 39,
"tags": [],
"templating": {
"list": [
{
"current": {
"selected": false,
"text": "gpu-txt-gen-cohereforai-c4ai-command-r-plu-ba7f1",
"value": "gpu-txt-gen-cohereforai-c4ai-command-r-plu-ba7f1"
},
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"definition": "label_values(tgi_request_count, container)",
"hide": 0,
"includeAll": false,
"multi": false,
"name": "service",
"options": [],
"query": {
"query": "label_values(tgi_request_count, container)",
"refId": "StandardVariableQuery"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"type": "query"
}
]
},
"time": {
"from": "now-30m",
"to": "now-30s"
},
"timepicker": {
"nowDelay": "30s"
},
"timezone": "",
"title": "Text Generation Inference",
"uid": "RHSk7EL4kdqsd",
"version": 12,
"weekStart": ""
}
| text-generation-inference/assets/tgi_grafana.json/0 | {
"file_path": "text-generation-inference/assets/tgi_grafana.json",
"repo_id": "text-generation-inference",
"token_count": 62818
} |
use cxx_build::CFG;
use pkg_config;
use std::env;
use std::env::consts::ARCH;
use std::path::{absolute, PathBuf};
use std::sync::LazyLock;
const ADDITIONAL_BACKEND_LINK_LIBRARIES: [&str; 1] = ["spdlog"];
const CUDA_ARCH_LIST: Option<&str> = option_env!("CUDA_ARCH_LIST");
const CUDA_REQUIRED_VERSION: &str = "12.8";
const MPI_REQUIRED_VERSION: &str = "4.1";
const INSTALL_PREFIX: Option<&str> = option_env!("CMAKE_INSTALL_PREFIX");
const TENSORRT_ROOT_DIR: Option<&str> = option_env!("TENSORRT_ROOT_DIR");
const NCCL_ROOT_DIR: Option<&str> = option_env!("NCCL_ROOT_DIR");
const IS_GHA_BUILD: LazyLock<bool> = LazyLock::new(|| {
option_env!("SCCACHE_GHA_ENABLED").map_or(false, |value| match value.to_lowercase().as_str() {
"on" => true,
"true" => true,
"1" => true,
_ => false,
})
});
// Dependencies
const BACKEND_DEPS: &str = "tgi_trtllm_backend_impl";
const CUDA_TRANSITIVE_DEPS: [&str; 4] = ["cuda", "cudart", "cublas", "nvidia-ml"];
const TENSORRT_LLM_TRANSITIVE_DEPS: [(&str, &str); 5] = [
("dylib", "tensorrt_llm"),
("dylib", "tensorrt_llm_nvrtc_wrapper"),
("dylib", "nvinfer_plugin_tensorrt_llm"),
("dylib", "decoder_attention_0"),
("dylib", "decoder_attention_1"),
];
macro_rules! probe {
($name: expr, $version: expr) => {
if let Err(_) = pkg_config::probe_library($name) {
pkg_config::probe_library(&format!("{}-{}", $name, $version))
.expect(&format!("Failed to locate {}", $name));
}
};
}
fn get_compiler_flag(
switch: bool,
true_case: &'static str,
false_case: &'static str,
) -> &'static str {
match switch {
true => true_case,
false => false_case,
}
}
fn get_library_architecture() -> &'static str {
let os = env::var("CARGO_CFG_TARGET_OS").unwrap();
let arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap();
let env = env::var("CARGO_CFG_TARGET_ENV").unwrap();
match os.as_str() {
"linux" => {
if env != "gnu" {
panic!("unsupported linux ABI {env}, only 'gnu' is supported")
}
match arch.as_str() {
"x86_64" => "x86_64-linux-gnu",
"aarch64" => "aarch64-linux-gnu",
_ => panic!("unsupported linux architecture {arch}"),
}
}
"windows" => {
if env != "msvc" {
panic!("unsupported windows ABI {env}, only 'msvc' is supported")
}
match arch.as_str() {
"x86_64" => "x86_64-windows-msvc",
_ => panic!("unsupported windows architecture {arch}"),
}
}
_ => panic!("unsupported OS {os}"),
}
}
fn build_backend(is_debug: bool, opt_level: &str, out_dir: &PathBuf) -> (PathBuf, PathBuf) {
// Build the backend implementation through CMake
let install_path = INSTALL_PREFIX.unwrap_or("/usr/local/tgi");
let tensorrt_path = TENSORRT_ROOT_DIR.unwrap_or("/usr/local/tensorrt");
let cuda_arch_list = CUDA_ARCH_LIST.unwrap_or("75-real;80-real;86-real;89-real;90-real");
let mut install_path = PathBuf::from(install_path);
if !install_path.is_absolute() {
install_path = absolute(out_dir).expect("cannot happen").join(install_path);
}
let mut config = cmake::Config::new(".");
config
.uses_cxx11()
.generator("Ninja")
.profile(match is_debug {
true => "Debug",
false => "Release",
})
.env("OPT_LEVEL", opt_level)
.define("CMAKE_INSTALL_PREFIX", &install_path)
.define("CMAKE_CUDA_COMPILER", "/usr/local/cuda/bin/nvcc")
.define("CMAKE_LIBRARY_ARCHITECTURE", get_library_architecture())
.define("TGI_TRTLLM_BACKEND_TARGET_CUDA_ARCH_LIST", cuda_arch_list)
.define(
"TGI_TRTLLM_BACKEND_DEBUG",
get_compiler_flag(is_debug, "ON", "OFF"),
)
.define("TGI_TRTLLM_BACKEND_TRT_ROOT", tensorrt_path);
if is_debug || *IS_GHA_BUILD {
config.define("TGI_TRTLLM_BACKEND_BUILD_TESTS", "ON");
}
if option_env!("USE_LLD_LINKER").is_some() {
println!("cargo:warning=Using lld linker");
config.define("TGI_TRTLLM_BACKEND_BUILD_USE_LLD", "ON");
}
if (is_debug && option_env!("ENABLE_ASAN").is_some()) || *IS_GHA_BUILD {
println!("cargo:warning=Enabling Address Sanitizer");
config.define("TGI_TRTLLM_BACKEND_ENABLE_ASAN", "ON");
}
if (is_debug && option_env!("ENABLE_UBSAN").is_some()) || *IS_GHA_BUILD {
println!("cargo:warning=Enabling Undefined Sanitizer");
config.define("TGI_TRTLLM_BACKEND_ENABLE_UBSAN", "ON");
}
if let Some(nvcc_host_compiler) = option_env!("CMAKE_CUDA_HOST_COMPILER") {
config.define("CMAKE_CUDA_HOST_COMPILER", nvcc_host_compiler);
}
if let Some(wrapper) = option_env!("RUSTC_WRAPPER") {
println!("cargo:warning=Using caching tool: {wrapper}");
config.define("CMAKE_C_COMPILER_LAUNCHER", wrapper);
config.define("CMAKE_CXX_COMPILER_LAUNCHER", wrapper);
config.define("CMAKE_CUDA_COMPILER_LAUNCHER", wrapper);
}
// Allow to override which Python to use ...
if let Some(python3) = option_env!("Python3_EXECUTABLE") {
config.define("Python3_EXECUTABLE", python3);
}
config.build();
// Additional transitive CMake dependencies
let deps_folder = out_dir.join("build").join("_deps");
for dependency in ADDITIONAL_BACKEND_LINK_LIBRARIES {
let dep_name = match is_debug {
true => format!("{}d", dependency),
false => String::from(dependency),
};
let dep_path = deps_folder.join(format!("{}-build", dependency));
println!("cargo:rustc-link-search={}", dep_path.display());
println!("cargo:rustc-link-lib=static={}", dep_name);
}
// Emit linkage information from the artifacts we just built
for path in ["lib", "lib64"] {
let install_lib_path = install_path.join(path);
println!(
r"cargo:warning=Adding link search path: {}",
install_lib_path.display()
);
println!(r"cargo:rustc-link-search={}", install_lib_path.display());
}
(PathBuf::from(install_path), deps_folder)
}
fn build_ffi_layer(deps_folder: &PathBuf, is_debug: bool) {
CFG.include_prefix = "backends/trtllm";
cxx_build::bridge("src/lib.rs")
.static_flag(true)
.std("c++23")
.include(deps_folder.join("spdlog-src").join("include"))
.include(deps_folder.join("json-src").join("include"))
.include(deps_folder.join("trtllm-src").join("cpp").join("include"))
.include("/usr/local/cuda/include")
.include("/usr/local/tensorrt/include")
.include("csrc/")
.file("csrc/ffi.hpp")
.define(
"TGI_TRTLLM_BACKEND_DEBUG",
get_compiler_flag(is_debug, "ON", "OFF"),
)
.compile("tgi_trtllm_backend");
println!("cargo:rerun-if-changed=CMakeLists.txt");
println!("cargo:rerun-if-changed=cmake/trtllm.cmake");
println!("cargo:rerun-if-changed=cmake/json.cmake");
println!("cargo:rerun-if-changed=cmake/spdlog.cmake");
println!("cargo:rerun-if-changed=csrc/backend.hpp");
println!("cargo:rerun-if-changed=csrc/backend.cpp");
println!("cargo:rerun-if-changed=csrc/hardware.hpp");
println!("cargo:rerun-if-changed=csrc/ffi.hpp");
}
fn main() {
// Misc variables
let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
let build_profile = env::var("PROFILE").unwrap();
let (is_debug, opt_level) = match build_profile.as_ref() {
"debug" => (true, "0"),
"dev" => (true, "0"),
_ => (false, "3"),
};
// Build the backend
let (_backend_path, deps_folder) = build_backend(is_debug, opt_level, &out_dir);
// Build the FFI layer calling the backend above
build_ffi_layer(&deps_folder, is_debug);
// Emit linkage search path
probe!("ompi", MPI_REQUIRED_VERSION);
// Probe CUDA & co. with pkg-config
CUDA_TRANSITIVE_DEPS.iter().for_each(|name| {
probe!(name, CUDA_REQUIRED_VERSION);
});
// NCCL is slightly trickier because it might not have a pkgconfig installed
let nccl_library_path_default = format!("/usr/local/{}-linux-gnu", ARCH);
let nccl_library_path = NCCL_ROOT_DIR.unwrap_or(&nccl_library_path_default);
println!(r"cargo:rustc-link-search=native={}", nccl_library_path);
println!("cargo:rustc-link-lib=dylib=nccl");
// TensorRT
let tensort_library_path = TENSORRT_ROOT_DIR.unwrap_or("/usr/local/tensorrt/lib");
println!(r"cargo:rustc-link-search=native={}", tensort_library_path);
println!("cargo:rustc-link-lib=dylib=nvinfer");
// TensorRT-LLM
TENSORRT_LLM_TRANSITIVE_DEPS
.iter()
.for_each(|(link_type, name)| {
println!("cargo:rustc-link-lib={}={}", link_type, name);
});
// Backend
println!("cargo:rustc-link-lib=static={}", &BACKEND_DEPS);
}
| text-generation-inference/backends/trtllm/build.rs/0 | {
"file_path": "text-generation-inference/backends/trtllm/build.rs",
"repo_id": "text-generation-inference",
"token_count": 4237
} |
//
// Created by mfuntowicz on 12/3/24.
//
#include <catch2/catch_all.hpp>
#include <nlohmann/json.hpp>
#include <tensorrt_llm/executor/executor.h>
#include "backend.hpp"
using namespace huggingface::tgi::backends::trtllm;
TEST_CASE("parse generation_config.json all set", "[generation_config_t]")
{
const json config_j = {{"temperature", 0.6},
{"top_p", 0.95},
{"eos_token_id", {1, 2, 3}}};
const auto generation_config = generation_config_t(config_j);
REQUIRE_THAT(generation_config.temperature, Catch::Matchers::WithinAbs(0.6, 1e-6));
REQUIRE_THAT(generation_config.top_p, Catch::Matchers::WithinAbs(0.95, 1e-6));
// Stop words
REQUIRE_FALSE(generation_config.stop_words.empty());
REQUIRE(generation_config.stop_words.size() == config_j["/eos_token_id"_json_pointer].size());
for (auto [lhs, rhs]: std::views::zip(generation_config.stop_words, std::list<std::vector<int32_t>>{{1},
{2},
{3}})) {
// Currently we do not support multi-tokens stop words
REQUIRE(lhs.size() == 1);
REQUIRE(rhs.size() == 1);
REQUIRE_THAT(lhs, Catch::Matchers::UnorderedEquals(rhs));
}
}
TEST_CASE("parse generation_config.json default", "[generation_config_t]")
{
const json config_j = {{"eos_token_id", {1, 2, 3}}};
const auto generation_config = generation_config_t(config_j);
REQUIRE_THAT(generation_config.temperature, Catch::Matchers::WithinAbs(1.0, 1e-6));
REQUIRE_THAT(generation_config.top_p, Catch::Matchers::WithinAbs(1.0, 1e-6));
REQUIRE_FALSE(generation_config.stop_words.empty());
REQUIRE(generation_config.stop_words.size() == config_j["/eos_token_id"_json_pointer].size());
for (auto [lhs, rhs]: std::views::zip(generation_config.stop_words, std::list<std::vector<int32_t>>{{1},
{2},
{3}})) {
// Currently we do not support multi-tokens stop words
REQUIRE(lhs.size() == 1);
REQUIRE(rhs.size() == 1);
REQUIRE_THAT(lhs, Catch::Matchers::UnorderedEquals(rhs));
}
}
TEST_CASE("parse generation_config.json empty", "[generation_config_t]")
{
const json config_j = {{"eos_token_id", {}}};
const auto generation_config = generation_config_t(config_j);
REQUIRE_THAT(generation_config.temperature, Catch::Matchers::WithinAbs(1.0, 1e-6));
REQUIRE_THAT(generation_config.top_p, Catch::Matchers::WithinAbs(1.0, 1e-6));
REQUIRE(generation_config.stop_words.empty());
const json config_j2 = {};
const auto generation_config2 = generation_config_t(config_j);
REQUIRE_THAT(generation_config2.temperature, Catch::Matchers::WithinAbs(1.0, 1e-6));
REQUIRE_THAT(generation_config2.top_p, Catch::Matchers::WithinAbs(1.0, 1e-6));
REQUIRE(generation_config2.stop_words.empty());
}
TEST_CASE("parallel_config single", "[backend_workspace_t]")
{
// Generate temporary folder
const auto tmp_p = std::filesystem::temp_directory_path();
const auto config_p = tmp_p / "config.json";
const auto generation_config_p = tmp_p / "generation_config.json";
// Generate content
std::ofstream o_config(config_p);
o_config << R"({"pretrained_config": {"mapping": {"world_size": 2}}})"_json;
o_config.close();
std::ofstream o_generation_config(generation_config_p);
o_generation_config << R"({"eos_token_id": []})"_json;
o_generation_config.close();
const auto workspace = backend_workspace_t(tmp_p.generic_string(), tmp_p.generic_string());
const auto parallel = workspace.parallel_config();
REQUIRE(parallel.getCommunicationMode() == tle::CommunicationMode::kORCHESTRATOR);
REQUIRE(parallel.getCommunicationType() == tle::CommunicationType::kMPI);
std::filesystem::remove(config_p);
std::filesystem::remove(generation_config_p);
}
TEST_CASE("parallel_config multi", "[backend_workspace_t]")
{
// Generate temporary folder
const auto tmp_p = std::filesystem::temp_directory_path();
const auto config_p = tmp_p / "config.json";
const auto generation_config_p = tmp_p / "generation_config.json";
// Generate content
std::ofstream o_config(config_p);
o_config << R"({"pretrained_config": {"mapping": {"world_size": 1}}})"_json;
o_config.close();
std::ofstream o_generation_config(generation_config_p);
o_generation_config << R"({"eos_token_id": []})"_json;
o_generation_config.close();
const auto workspace = backend_workspace_t(tmp_p.generic_string(), tmp_p.generic_string());
const auto parallel = workspace.parallel_config();
REQUIRE(parallel.getCommunicationMode() == tle::CommunicationMode::kLEADER);
REQUIRE(parallel.getCommunicationType() == tle::CommunicationType::kMPI);
std::filesystem::remove(config_p);
std::filesystem::remove(generation_config_p);
}
TEST_CASE("executor_config", "[backend_workspace_t]")
{
}
TEST_CASE("sampling_params_t to tle::SamplingConfig", "[backend_t]")
{
const sampling_params_t params = {40, 0.95, 0.9, 1.0, 0.6, 2014};
const auto config = static_cast<tle::SamplingConfig>(params);
REQUIRE(config.getTopK().has_value());
REQUIRE(config.getTopK().value() == params.top_k);
REQUIRE(config.getSeed().has_value());
REQUIRE(config.getSeed().value() == params.seed);
REQUIRE(config.getTopP().has_value());
REQUIRE_THAT(*config.getTopP(), Catch::Matchers::WithinAbs(params.top_p, 1e-6f));
REQUIRE(config.getRepetitionPenalty().has_value());
REQUIRE_THAT(*config.getRepetitionPenalty(), Catch::Matchers::WithinAbs(params.repetition_penalty, 1e-6f));
REQUIRE(config.getFrequencyPenalty().has_value());
REQUIRE_THAT(*config.getFrequencyPenalty(), Catch::Matchers::WithinAbs(params.frequency_penalty, 1e-6f));
REQUIRE(config.getTemperature().has_value());
REQUIRE_THAT(*config.getTemperature(), Catch::Matchers::WithinAbs(params.temperature, 1e-6f));
}
| text-generation-inference/backends/trtllm/tests/test_backend.cpp/0 | {
"file_path": "text-generation-inference/backends/trtllm/tests/test_backend.cpp",
"repo_id": "text-generation-inference",
"token_count": 2696
} |
Documentation available at: https://huggingface.co/docs/text-generation-inference
## Release
When making a release, please update the latest version in the documentation with:
```
export OLD_VERSION="2\.0\.3"
export NEW_VERSION="2\.0\.4"
find . -name '*.md' -exec sed -i -e "s/$OLD_VERSION/$NEW_VERSION/g" {} \;
```
| text-generation-inference/docs/README.md/0 | {
"file_path": "text-generation-inference/docs/README.md",
"repo_id": "text-generation-inference",
"token_count": 107
} |
# Using TGI with Intel GPUs
TGI optimized models are supported on Intel Data Center GPU [Max1100](https://www.intel.com/content/www/us/en/products/sku/232876/intel-data-center-gpu-max-1100/specifications.html), [Max1550](https://www.intel.com/content/www/us/en/products/sku/232873/intel-data-center-gpu-max-1550/specifications.html), the recommended usage is through Docker.
On a server powered by Intel GPUs, TGI can be launched with the following command:
```bash
model=teknium/OpenHermes-2.5-Mistral-7B
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
docker run --rm --privileged --cap-add=sys_nice \
--device=/dev/dri \
--ipc=host --shm-size 1g --net host -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:3.1.0-intel-xpu \
--model-id $model --cuda-graphs 0
```
# Using TGI with Intel CPUs
Intel® Extension for PyTorch (IPEX) also provides further optimizations for Intel CPUs. The IPEX provides optimization operations such as flash attention, page attention, Add + LayerNorm, ROPE and more.
On a server powered by Intel CPU, TGI can be launched with the following command:
```bash
model=teknium/OpenHermes-2.5-Mistral-7B
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
docker run --rm --privileged --cap-add=sys_nice \
--device=/dev/dri \
--ipc=host --shm-size 1g --net host -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:3.1.0-intel-cpu \
--model-id $model --cuda-graphs 0
```
The launched TGI server can then be queried from clients, make sure to check out the [Consuming TGI](./basic_tutorials/consuming_tgi) guide.
| text-generation-inference/docs/source/installation_intel.md/0 | {
"file_path": "text-generation-inference/docs/source/installation_intel.md",
"repo_id": "text-generation-inference",
"token_count": 562
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 17934,
"logprob": null,
"text": "Pour"
},
{
"id": 49833,
"logprob": -10.5703125,
"text": " dég"
},
{
"id": 21543,
"logprob": -0.14746094,
"text": "uster"
},
{
"id": 447,
"logprob": -1.9277344,
"text": " un"
},
{
"id": 46341,
"logprob": -15.421875,
"text": " ort"
},
{
"id": 35567,
"logprob": -7.5820312,
"text": "olan"
},
{
"id": 15,
"logprob": -1.4013672,
"text": ","
},
{
"id": 1669,
"logprob": -1.5595703,
"text": " il"
},
{
"id": 11580,
"logprob": -0.9428711,
"text": " faut"
},
{
"id": 3913,
"logprob": -3.703125,
"text": " tout"
},
{
"id": 39261,
"logprob": -1.7763672,
"text": " d'abord"
}
],
"seed": 0,
"tokens": [
{
"id": 578,
"logprob": -1.7822266,
"special": false,
"text": " le"
},
{
"id": 5608,
"logprob": -2.4882812,
"special": false,
"text": " faire"
},
{
"id": 7735,
"logprob": -2.4199219,
"special": false,
"text": " fond"
},
{
"id": 289,
"logprob": 0.0,
"special": false,
"text": "re"
},
{
"id": 693,
"logprob": -2.4628906,
"special": false,
"text": " à"
},
{
"id": 366,
"logprob": -1.1308594,
"special": false,
"text": " la"
},
{
"id": 48844,
"logprob": -1.7900391,
"special": false,
"text": " cass"
},
{
"id": 1744,
"logprob": 0.0,
"special": false,
"text": "ero"
},
{
"id": 327,
"logprob": 0.0,
"special": false,
"text": "le"
},
{
"id": 2940,
"logprob": -1.9306641,
"special": false,
"text": " avec"
}
],
"top_tokens": null
},
"generated_text": " le faire fondre à la casserole avec"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m.json",
"repo_id": "text-generation-inference",
"token_count": 1548
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 18682,
"logprob": -0.8769531,
"special": false,
"text": " Deep"
},
{
"id": 6975,
"logprob": -0.0076942444,
"special": false,
"text": " learning"
},
{
"id": 374,
"logprob": -0.25073242,
"special": false,
"text": " is"
},
{
"id": 264,
"logprob": -0.097595215,
"special": false,
"text": " a"
},
{
"id": 955,
"logprob": -0.921875,
"special": false,
"text": " type"
},
{
"id": 315,
"logprob": -0.00027918816,
"special": false,
"text": " of"
},
{
"id": 21075,
"logprob": -0.5527344,
"special": false,
"text": " artificial"
},
{
"id": 11478,
"logprob": -0.042541504,
"special": false,
"text": " intelligence"
},
{
"id": 320,
"logprob": -0.38891602,
"special": false,
"text": " ("
},
{
"id": 15836,
"logprob": -0.0011043549,
"special": false,
"text": "AI"
}
],
"top_tokens": null
},
"generated_text": " Deep learning is a type of artificial intelligence (AI"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_compressed_tensors_w8an_fp/test_compressed_tensors_w8an.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_compressed_tensors_w8an_fp/test_compressed_tensors_w8an.json",
"repo_id": "text-generation-inference",
"token_count": 869
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 185,
"logprob": -1.546875,
"special": false,
"text": "\n"
},
{
"id": 549,
"logprob": -2.859375,
"special": false,
"text": "The"
},
{
"id": 1727,
"logprob": -2.484375,
"special": false,
"text": " test"
},
{
"id": 3102,
"logprob": -0.83203125,
"special": false,
"text": " request"
},
{
"id": 317,
"logprob": -1.1484375,
"special": false,
"text": " is"
},
{
"id": 245,
"logprob": -1.578125,
"special": false,
"text": " a"
},
{
"id": 3412,
"logprob": -2.578125,
"special": false,
"text": " document"
},
{
"id": 344,
"logprob": -1.125,
"special": false,
"text": " that"
},
{
"id": 317,
"logprob": -1.6953125,
"special": false,
"text": " is"
},
{
"id": 1222,
"logprob": -1.71875,
"special": false,
"text": " used"
}
],
"top_tokens": null
},
"generated_text": "\nThe test request is a document that is used"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_deepseek_v2/test_flash_deepseek_v2.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_deepseek_v2/test_flash_deepseek_v2.json",
"repo_id": "text-generation-inference",
"token_count": 858
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -2.0566406,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -1.5253906,
"special": false,
"text": "\n"
},
{
"id": 29902,
"logprob": -2.7578125,
"special": false,
"text": "I"
},
{
"id": 4966,
"logprob": -1.9033203,
"special": false,
"text": " hope"
},
{
"id": 445,
"logprob": -0.5019531,
"special": false,
"text": " this"
},
{
"id": 6911,
"logprob": -0.21264648,
"special": false,
"text": " helps"
},
{
"id": 29991,
"logprob": -0.5991211,
"special": false,
"text": "!"
},
{
"id": 2803,
"logprob": -0.37475586,
"special": false,
"text": " Let"
},
{
"id": 592,
"logprob": -0.018463135,
"special": false,
"text": " me"
},
{
"id": 1073,
"logprob": -0.0008597374,
"special": false,
"text": " know"
}
],
"top_tokens": null
},
"generated_text": "\n\nI hope this helps! Let me know"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar.json",
"repo_id": "text-generation-inference",
"token_count": 866
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": 0,
"tokens": [
{
"id": 28747,
"logprob": 0.0,
"special": false,
"text": ":"
},
{
"id": 3169,
"logprob": -0.1307373,
"special": false,
"text": " Let"
},
{
"id": 332,
"logprob": -2.3359375,
"special": false,
"text": " u"
},
{
"id": 347,
"logprob": 0.0,
"special": false,
"text": " be"
},
{
"id": 325,
"logprob": -1.0234375,
"special": false,
"text": " ("
},
{
"id": 28734,
"logprob": -2.0292969,
"special": false,
"text": "0"
},
{
"id": 648,
"logprob": -1.0439453,
"special": false,
"text": " +"
},
{
"id": 28705,
"logprob": -0.24499512,
"special": false,
"text": " "
},
{
"id": 28770,
"logprob": -0.5073242,
"special": false,
"text": "3"
},
{
"id": 387,
"logprob": -1.5507812,
"special": false,
"text": " -"
}
],
"top_tokens": null
},
"generated_text": "Test request: Let u be (0 + 3 -"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 856
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 8,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 2502,
"logprob": -1.7890625,
"special": false,
"text": "image"
},
{
"id": 2196,
"logprob": -0.53125,
"special": false,
"text": " result"
},
{
"id": 604,
"logprob": -0.0077209473,
"special": false,
"text": " for"
},
{
"id": 12254,
"logprob": -1.703125,
"special": false,
"text": " chicken"
},
{
"id": 611,
"logprob": -0.21582031,
"special": false,
"text": " on"
},
{
"id": 573,
"logprob": -0.734375,
"special": false,
"text": " the"
},
{
"id": 8318,
"logprob": -0.026000977,
"special": false,
"text": " beach"
},
{
"id": 1,
"logprob": -0.2109375,
"special": true,
"text": "<eos>"
}
],
"top_tokens": null
},
"generated_text": "image result for chicken on the beach"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_pali_gemma/test_flash_pali_gemma_two_images.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_pali_gemma/test_flash_pali_gemma_two_images.json",
"repo_id": "text-generation-inference",
"token_count": 719
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4911,
"logprob": -6.9765625,
"text": "User"
},
{
"id": 29901,
"logprob": -0.0059432983,
"text": ":"
},
{
"id": 32000,
"logprob": -0.8408203,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -9.906292e-05,
"text": "<image>"
},
{
"id": 32000,
"logprob": -2.3841858e-07,
"text": "<fake_token_around_image>"
},
{
"id": 1815,
"logprob": -4.1679688,
"text": "Can"
},
{
"id": 366,
"logprob": -0.014099121,
"text": "you"
},
{
"id": 2649,
"logprob": -4.4609375,
"text": "tell"
},
{
"id": 592,
"logprob": -0.29882812,
"text": "me"
},
{
"id": 263,
"logprob": -4.1445312,
"text": "a"
},
{
"id": 1407,
"logprob": -9.3828125,
"text": "very"
},
{
"id": 3273,
"logprob": -1.9736328,
"text": "short"
},
{
"id": 5828,
"logprob": -0.2800293,
"text": "story"
},
{
"id": 2729,
"logprob": -3.5625,
"text": "based"
},
{
"id": 373,
"logprob": -0.0006427765,
"text": "on"
},
{
"id": 278,
"logprob": -0.13952637,
"text": "the"
},
{
"id": 1967,
"logprob": -0.068115234,
"text": "image"
},
{
"id": 29973,
"logprob": -0.16357422,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 32002,
"logprob": -0.0026474,
"special": true,
"text": "<end_of_utterance>"
},
{
"id": 29871,
"logprob": -8.547306e-05,
"special": false,
"text": " "
},
{
"id": 13,
"logprob": -1.7881393e-05,
"special": false,
"text": "\n"
},
{
"id": 7900,
"logprob": -3.0994415e-06,
"special": false,
"text": "Ass"
},
{
"id": 22137,
"logprob": 0.0,
"special": false,
"text": "istant"
},
{
"id": 29901,
"logprob": -3.2186508e-06,
"special": false,
"text": ":"
},
{
"id": 319,
"logprob": -0.92529297,
"special": false,
"text": " A"
},
{
"id": 696,
"logprob": -1.1269531,
"special": false,
"text": " ro"
},
{
"id": 15664,
"logprob": -0.00029492378,
"special": false,
"text": "oster"
},
{
"id": 15028,
"logprob": -1.1855469,
"special": false,
"text": " stands"
}
]
},
"generated_text": " \nAssistant: A rooster stands"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_idefics/test_idefics.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_idefics/test_idefics.json",
"repo_id": "text-generation-inference",
"token_count": 2062
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 2502,
"logprob": null,
"text": " red"
},
{
"id": 13,
"logprob": -2.734375,
"text": ","
},
{
"id": 8862,
"logprob": -3.6875,
"text": " yellow"
},
{
"id": 13,
"logprob": -0.40234375,
"text": ","
},
{
"id": 209,
"logprob": -8.25,
"text": " "
}
],
"seed": 0,
"tokens": [
{
"id": 187,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 395,
"logprob": -0.3125,
"special": false,
"text": "and"
},
{
"id": 4797,
"logprob": 0.0,
"special": false,
"text": " blue"
},
{
"id": 9830,
"logprob": -2.25,
"special": false,
"text": " colors"
},
{
"id": 15,
"logprob": 0.0,
"special": false,
"text": "."
},
{
"id": 329,
"logprob": -2.171875,
"special": false,
"text": " A"
},
{
"id": 1180,
"logprob": -2.046875,
"special": false,
"text": " number"
},
{
"id": 273,
"logprob": 0.0,
"special": false,
"text": " of"
},
{
"id": 1027,
"logprob": -1.5546875,
"special": false,
"text": " different"
},
{
"id": 3295,
"logprob": -0.97265625,
"special": false,
"text": " color"
}
],
"top_tokens": null
},
"generated_text": "blue, red, yellow, \nand blue colors. A number of different color"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_mamba/test_mamba_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_mamba/test_mamba_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 1157
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 8,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 330,
"logprob": -0.118652344,
"special": false,
"text": " A"
},
{
"id": 11426,
"logprob": -0.28320312,
"special": false,
"text": " bee"
},
{
"id": 335,
"logprob": -0.95703125,
"special": false,
"text": " on"
},
{
"id": 253,
"logprob": -0.06982422,
"special": false,
"text": " a"
},
{
"id": 11986,
"logprob": -0.49414062,
"special": false,
"text": " pink"
},
{
"id": 8525,
"logprob": -0.07763672,
"special": false,
"text": " flower"
},
{
"id": 30,
"logprob": -1.0703125,
"special": false,
"text": "."
},
{
"id": 49154,
"logprob": -0.092285156,
"special": true,
"text": "<end_of_utterance>"
}
],
"top_tokens": null
},
"generated_text": " A bee on a pink flower."
}
| text-generation-inference/integration-tests/models/__snapshots__/test_smolvlm/test_flash_smolvlm_next_simple_url.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_smolvlm/test_flash_smolvlm_next_simple_url.json",
"repo_id": "text-generation-inference",
"token_count": 722
} |
import pytest
import requests
import json
from aiohttp import ClientSession
from text_generation.types import Completion, ChatCompletionChunk
@pytest.fixture(scope="module")
def flash_llama_completion_handle(launcher):
with launcher(
"meta-llama/Meta-Llama-3.1-8B-Instruct",
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_completion(flash_llama_completion_handle):
await flash_llama_completion_handle.health(300)
return flash_llama_completion_handle.client
# NOTE: since `v1/completions` is a deprecated inferface/endpoint we do not provide a convience
# method for it. Instead, we use the `requests` library to make the HTTP request directly.
@pytest.mark.release
def test_flash_llama_completion_single_prompt(
flash_llama_completion, response_snapshot
):
response = requests.post(
f"{flash_llama_completion.base_url}/v1/completions",
json={
"model": "tgi",
"prompt": "What is Deep Learning?",
"max_tokens": 10,
"temperature": 0.0,
},
headers=flash_llama_completion.headers,
stream=False,
)
response = response.json()
assert len(response["choices"]) == 1
assert (
response["choices"][0]["text"]
== " A Beginner’s Guide\nDeep learning is a subset"
)
assert response == response_snapshot
@pytest.mark.release
async def test_flash_llama_completion_stream_usage(
flash_llama_completion, response_snapshot
):
url = f"{flash_llama_completion.base_url}/v1/chat/completions"
request = {
"model": "tgi",
"messages": [
{
"role": "user",
"content": "What is Deep Learning?",
}
],
"max_tokens": 10,
"temperature": 0.0,
"stream_options": {"include_usage": True},
"stream": True,
}
string = ""
chunks = []
had_usage = False
async with ClientSession(headers=flash_llama_completion.headers) as session:
async with session.post(url, json=request) as response:
# iterate over the stream
async for chunk in response.content.iter_any():
# remove "data:"
chunk = chunk.decode().split("\n\n")
# remove "data:" if present
chunk = [c.replace("data:", "") for c in chunk]
# remove empty strings
chunk = [c for c in chunk if c]
# remove completion marking chunk
chunk = [c for c in chunk if c != " [DONE]"]
# parse json
chunk = [json.loads(c) for c in chunk]
for c in chunk:
chunks.append(ChatCompletionChunk(**c))
assert "choices" in c
if len(c["choices"]) == 1:
index = c["choices"][0]["index"]
assert index == 0
string += c["choices"][0]["delta"]["content"]
has_usage = c["usage"] is not None
assert not had_usage
if has_usage:
had_usage = True
else:
raise RuntimeError("Expected different payload")
assert had_usage
assert (
string
== "**Deep Learning: An Overview**\n=====================================\n\n"
)
assert chunks == response_snapshot
request = {
"model": "tgi",
"messages": [
{
"role": "user",
"content": "What is Deep Learning?",
}
],
"max_tokens": 10,
"temperature": 0.0,
"stream": True,
}
string = ""
chunks = []
had_usage = False
async with ClientSession(headers=flash_llama_completion.headers) as session:
async with session.post(url, json=request) as response:
# iterate over the stream
async for chunk in response.content.iter_any():
# remove "data:"
chunk = chunk.decode().split("\n\n")
# remove "data:" if present
chunk = [c.replace("data:", "") for c in chunk]
# remove empty strings
chunk = [c for c in chunk if c]
# remove completion marking chunk
chunk = [c for c in chunk if c != " [DONE]"]
# parse json
chunk = [json.loads(c) for c in chunk]
for c in chunk:
chunks.append(ChatCompletionChunk(**c))
assert "choices" in c
if len(c["choices"]) == 1:
index = c["choices"][0]["index"]
assert index == 0
string += c["choices"][0]["delta"]["content"]
has_usage = c["usage"] is not None
assert not had_usage
if has_usage:
had_usage = True
else:
raise RuntimeError("Expected different payload")
assert not had_usage
assert (
string
== "**Deep Learning: An Overview**\n=====================================\n\n"
)
@pytest.mark.release
def test_flash_llama_completion_many_prompts(flash_llama_completion, response_snapshot):
response = requests.post(
f"{flash_llama_completion.base_url}/v1/completions",
json={
"model": "tgi",
"prompt": [
"What is Deep Learning?",
"Is water wet?",
"What is the capital of France?",
"def mai",
],
"max_tokens": 10,
"seed": 0,
"temperature": 0.0,
},
headers=flash_llama_completion.headers,
stream=False,
)
response = response.json()
assert len(response["choices"]) == 4
all_indexes = [(choice["index"], choice["text"]) for choice in response["choices"]]
all_indexes.sort()
all_indices, all_strings = zip(*all_indexes)
assert list(all_indices) == [0, 1, 2, 3]
assert list(all_strings) == [
" A Beginner’s Guide\nDeep learning is a subset",
" This is a question that has puzzled many people for",
" Paris\nWhat is the capital of France?\nThe",
'usculas_minusculas(s):\n """\n',
]
assert response == response_snapshot
@pytest.mark.release
async def test_flash_llama_completion_many_prompts_stream(
flash_llama_completion, response_snapshot
):
request = {
"model": "tgi",
"prompt": [
"What is Deep Learning?",
"Is water wet?",
"What is the capital of France?",
"def mai",
],
"max_tokens": 10,
"seed": 0,
"temperature": 0.0,
"stream": True,
}
url = f"{flash_llama_completion.base_url}/v1/completions"
chunks = []
strings = [""] * 4
async with ClientSession(headers=flash_llama_completion.headers) as session:
async with session.post(url, json=request) as response:
# iterate over the stream
async for chunk in response.content.iter_any():
# remove "data:"
chunk = chunk.decode().split("\n\n")
# remove "data:" if present
chunk = [c.replace("data:", "") for c in chunk]
# remove empty strings
chunk = [c for c in chunk if c]
# remove completion marking chunk
chunk = [c for c in chunk if c != " [DONE]"]
# parse json
chunk = [json.loads(c) for c in chunk]
for c in chunk:
chunks.append(Completion(**c))
assert "choices" in c
index = c["choices"][0]["index"]
assert 0 <= index <= 4
strings[index] += c["choices"][0]["text"]
assert response.status == 200
assert list(strings) == [
" A Beginner’s Guide\nDeep learning is a subset",
" This is a question that has puzzled many people for",
" Paris\nWhat is the capital of France?\nThe",
'usculas_minusculas(s):\n """\n',
]
assert chunks == response_snapshot
| text-generation-inference/integration-tests/models/test_completion_prompts.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_completion_prompts.py",
"repo_id": "text-generation-inference",
"token_count": 4135
} |
import pytest
@pytest.fixture(scope="module")
def flash_llama_handle(launcher):
with launcher("huggingface/llama-7b", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama(flash_llama_handle):
await flash_llama_handle.health(300)
return flash_llama_handle.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_simple(flash_llama, response_snapshot):
response = await flash_llama.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_all_params(flash_llama, response_snapshot):
response = await flash_llama.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 5
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_load(flash_llama, generate_load, response_snapshot):
responses = await generate_load(flash_llama, "Test request", max_new_tokens=10, n=4)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_llama.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_llama.py",
"repo_id": "text-generation-inference",
"token_count": 657
} |
import pytest
@pytest.fixture(scope="module")
def flash_pali_gemma_handle(launcher):
with launcher(
"google/paligemma-3b-pt-224",
num_shard=1,
revision="float16",
max_input_length=4000,
max_total_tokens=4096,
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_pali_gemma(flash_pali_gemma_handle):
await flash_pali_gemma_handle.health(300)
return flash_pali_gemma_handle.client
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_pali_gemma(flash_pali_gemma, response_snapshot, cow_beach):
inputs = f"Where is the cow standing?\n"
response = await flash_pali_gemma.generate(inputs, max_new_tokens=20)
assert response.generated_text == "beach"
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_pali_gemma_two_images(
flash_pali_gemma, response_snapshot, chicken, cow_beach
):
response = await flash_pali_gemma.generate(
f"caption\n",
max_new_tokens=20,
)
# Is PaliGemma not able to handle two separate images? At least we
# get output showing that both images are used.
assert (
response.generated_text == "image result for chicken on the beach"
), f"{repr(response.generated_text)}"
assert response == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_pali_gemma.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_pali_gemma.py",
"repo_id": "text-generation-inference",
"token_count": 587
} |
import pytest
@pytest.fixture(scope="module")
def flash_llava_next_handle(launcher):
with launcher(
"llava-hf/llava-v1.6-mistral-7b-hf",
num_shard=4,
max_input_length=4000,
max_total_tokens=4096,
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llava_next(flash_llava_next_handle):
await flash_llava_next_handle.health(300)
return flash_llava_next_handle.client
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llava_next_simple(flash_llava_next, response_snapshot, chicken):
response = await flash_llava_next.generate(
f"User:Can you tell me a very short story based on the image?",
max_new_tokens=10,
)
assert (
response.generated_text == "\n\nOnce upon a time, there was a"
), f"{repr(response.generated_text)}"
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llava_next_all_params(flash_llava_next, response_snapshot):
response = await flash_llava_next.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 6
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llava_next_load(
flash_llava_next, generate_load, response_snapshot, chicken
):
responses = await generate_load(
flash_llava_next,
f"User:Can you tell me a very short story based on the image?",
max_new_tokens=10,
n=4,
)
generated_texts = [r.generated_text for r in responses]
assert generated_texts[0] == "\n\nOnce upon a time, there was a"
assert len(generated_texts) == 4
assert all([r.generated_text == generated_texts[0] for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_llava_next.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_llava_next.py",
"repo_id": "text-generation-inference",
"token_count": 961
} |
[package]
name = "text-generation-launcher"
description = "Text Generation Launcher"
version.workspace = true
edition.workspace = true
authors.workspace = true
homepage.workspace = true
[dependencies]
clap = { version = "4.4.5", features = ["derive", "env"] }
ctrlc = { version = "3.4.1", features = ["termination"] }
hf-hub = "0.3.2"
nix = { version = "0.28.0", features = ["signal"] }
once_cell = "1.19.0"
pyo3 = { workspace = true }
serde = { version = "1.0.188", features = ["derive"] }
serde_json = "1.0.107"
thiserror = "1.0.59"
tracing = "0.1.37"
tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] }
regex = "1.11.0"
[dev-dependencies]
float_eq = "1.0.1"
reqwest = { version = "0.11.20", features = ["blocking", "json"] }
[build-dependencies]
vergen = { version = "8.2.5", features = ["build", "cargo", "git", "gitcl", "rustc", "si"] }
| text-generation-inference/launcher/Cargo.toml/0 | {
"file_path": "text-generation-inference/launcher/Cargo.toml",
"repo_id": "text-generation-inference",
"token_count": 343
} |
{ pkgs, nix-filter }:
let
filter = nix-filter.lib;
in
with pkgs;
defaultCrateOverrides
// {
aws-lc-rs = attrs: {
# aws-lc-rs does its own custom parsing of Cargo environment
# variables like DEP_.*_INCLUDE. However buildRustCrate does
# not use the version number, so the parsing fails.
postPatch = ''
substituteInPlace build.rs \
--replace-fail \
"assert!(!selected.is_empty()" \
"// assert!(!selected.is_empty()"
'';
};
rav1e = attrs: { env.CARGO_ENCODED_RUSTFLAGS = "-C target-feature=-crt-static"; };
grpc-metadata = attrs: {
src = filter {
root = ../backends/grpc-metadata;
include = with filter; [
isDirectory
(matchExt "rs")
];
};
};
pyo3-build-config = attrs: {
buildInputs = [ python3 ];
};
text-generation-benchmark = attrs: {
src = filter {
root = ../benchmark;
include = with filter; [
isDirectory
(matchExt "rs")
];
};
};
text-generation-client = attrs: {
src = filter {
root = ../.;
include = with filter; [
isDirectory
(and (inDirectory "backends/client") (matchExt "rs"))
(and (inDirectory "proto") (matchExt "proto"))
];
};
postPatch = "cd backends/client";
buildInputs = [ protobuf ];
};
text-generation-launcher = attrs: {
src = filter {
root = ../launcher;
include = with filter; [
isDirectory
(matchExt "rs")
];
};
};
text-generation-router = attrs: {
src = filter {
root = ../router;
include = with filter; [
isDirectory
(matchExt "rs")
];
};
};
text-generation-router-v3 = attrs: {
# We need to do the src/source root dance so that the build
# has access to the protobuf file.
src = filter {
root = ../.;
include = with filter; [
isDirectory
(and (inDirectory "backends/v3") (matchExt "rs"))
(and (inDirectory "proto") (matchExt "proto"))
];
};
postPatch = "cd backends/v3";
buildInputs = [ protobuf ];
};
}
| text-generation-inference/nix/crate-overrides.nix/0 | {
"file_path": "text-generation-inference/nix/crate-overrides.nix",
"repo_id": "text-generation-inference",
"token_count": 937
} |
use opentelemetry::sdk::propagation::TraceContextPropagator;
use opentelemetry::sdk::trace;
use opentelemetry::sdk::trace::Sampler;
use opentelemetry::sdk::Resource;
use opentelemetry::{global, KeyValue};
use opentelemetry_otlp::WithExportConfig;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::{filter::LevelFilter, EnvFilter, Layer};
/// Init logging using env variables LOG_LEVEL and LOG_FORMAT:
/// - otlp_endpoint is an optional URL to an Open Telemetry collector
/// - otlp_service_name service name to appear in APM
/// - LOG_LEVEL may be TRACE, DEBUG, INFO, WARN or ERROR (default to INFO)
/// - LOG_FORMAT may be TEXT or JSON (default to TEXT)
/// - LOG_COLORIZE may be "false" or "true" (default to "true" or ansi supported platforms)
pub fn init_logging(otlp_endpoint: Option<String>, otlp_service_name: String, json_output: bool) {
let mut layers = Vec::new();
// STDOUT/STDERR layer
let ansi = std::env::var("LOG_COLORIZE") != Ok("1".to_string());
let fmt_layer = tracing_subscriber::fmt::layer()
.with_file(true)
.with_ansi(ansi)
.with_line_number(true);
let fmt_layer = match json_output {
true => fmt_layer.json().flatten_event(true).boxed(),
false => fmt_layer.boxed(),
};
layers.push(fmt_layer);
// OpenTelemetry tracing layer
if let Some(otlp_endpoint) = otlp_endpoint {
global::set_text_map_propagator(TraceContextPropagator::new());
let tracer = opentelemetry_otlp::new_pipeline()
.tracing()
.with_exporter(
opentelemetry_otlp::new_exporter()
.tonic()
.with_endpoint(otlp_endpoint),
)
.with_trace_config(
trace::config()
.with_resource(Resource::new(vec![KeyValue::new(
"service.name",
otlp_service_name,
)]))
.with_sampler(Sampler::AlwaysOn),
)
.install_batch(opentelemetry::runtime::Tokio);
if let Ok(tracer) = tracer {
layers.push(tracing_opentelemetry::layer().with_tracer(tracer).boxed());
init_tracing_opentelemetry::init_propagator().unwrap();
};
}
// Filter events with LOG_LEVEL
let varname = "LOG_LEVEL";
let env_filter = if let Ok(log_level) = std::env::var(varname) {
// Override to avoid simple logs to be spammed with tokio level informations
let log_level = match &log_level[..] {
"warn" => "text_generation_launcher=warn,text_generation_router=warn",
"info" => "text_generation_launcher=info,text_generation_router=info",
"debug" => "text_generation_launcher=debug,text_generation_router=debug",
log_level => log_level,
};
EnvFilter::builder()
.with_default_directive(LevelFilter::INFO.into())
.parse_lossy(log_level)
} else {
EnvFilter::new("info")
};
tracing_subscriber::registry()
.with(env_filter)
.with(layers)
.init();
}
| text-generation-inference/router/src/logging.rs/0 | {
"file_path": "text-generation-inference/router/src/logging.rs",
"repo_id": "text-generation-inference",
"token_count": 1445
} |
lorax_punica_commit := c71861a653412267dc27ec86013dd945ce3474bc
build-lorax-punica:
if [ ! -d 'lorax-punica' ]; then \
git clone --no-checkout https://github.com/predibase/lorax.git lorax-punica; \
fi
cd lorax-punica && git sparse-checkout set server/punica_kernels && git checkout $(lorax_punica_commit)
cd lorax-punica && git submodule update --init --recursive
cd lorax-punica/server/punica_kernels && python setup.py build
install-lorax-punica: build-lorax-punica
cd lorax-punica/server/punica_kernels && python setup.py install
| text-generation-inference/server/Makefile-lorax-punica/0 | {
"file_path": "text-generation-inference/server/Makefile-lorax-punica",
"repo_id": "text-generation-inference",
"token_count": 208
} |
// Adapted from turboderp exllama: https://github.com/turboderp/exllama
#ifndef _q4_matrix_cuh
#define _q4_matrix_cuh
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cstdint>
class Q4Matrix
{
public:
int device;
int height;
int width;
int groups;
int groupsize;
uint32_t* cuda_qweight = NULL;
uint32_t* cuda_qzeros = NULL;
half* cuda_scales = NULL;
uint32_t* cuda_x_map = NULL;
Q4Matrix
(
const int _height,
const int _width,
const int _groups,
uint32_t* _qweight,
uint32_t* _qzeros,
half* _scales,
uint32_t* _g_idx,
const int _device
);
~Q4Matrix();
void reconstruct(half* out);
private:
void make_sequential(const uint32_t* cpu_g_idx);
};
void g_q4_keep_matrix(Q4Matrix* m);
void g_q4_free_matrices();
#endif
| text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matrix.cuh/0 | {
"file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matrix.cuh",
"repo_id": "text-generation-inference",
"token_count": 420
} |
#ifndef _q_matrix_cuh
#define _q_matrix_cuh
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cstdint>
#include <cstdio>
#define MAX_SUPERGROUPS 16
class QMatrix
{
public:
int device;
bool is_gptq;
int height;
int width;
int groups;
int gptq_groupsize;
int rows_8;
int rows_6;
int rows_5;
int rows_4;
int rows_3;
int rows_2;
uint32_t* cuda_q_weight = NULL;
uint16_t* cuda_q_perm = NULL;
uint16_t* cuda_q_invperm = NULL;
uint32_t* cuda_q_scale = NULL;
half* cuda_q_scale_max = NULL;
uint16_t* cuda_q_groups = NULL;
uint16_t* cuda_q_group_map = NULL;
uint32_t* cuda_gptq_qzeros = NULL;
half* cuda_gptq_scales = NULL;
half* temp_dq;
bool failed;
QMatrix
(
const int _device,
const int _height,
const int _width,
const int _groups,
uint32_t* _q_weight,
uint16_t* _q_perm,
uint16_t* _q_invperm,
uint32_t* _q_scale,
half* _q_scale_max,
uint16_t* _q_groups,
uint16_t* _q_group_map,
uint32_t* _gptq_qzeros,
half* _gptq_scales,
uint32_t* _gptq_g_idx,
half* _temp_dq
);
~QMatrix();
void reconstruct(half* out);
bool make_sequential(const uint32_t* cpu_g_idx);
private:
};
#endif
| text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cuh/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cuh",
"repo_id": "text-generation-inference",
"token_count": 702
} |
# Origin: https://github.com/predibase/lorax
# Path: lorax/server/lorax_server/adapters/config.py
# License: Apache License Version 2.0, January 2004
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Dict, Set, Tuple
import torch
from text_generation_server.adapters.weights import AdapterWeights
@dataclass
class ModuleMap:
module_name: str
module_weights: Dict[str, Tuple[torch.Tensor, str]]
@dataclass
class AdapterConfig(ABC):
base_model_name_or_path: str
@abstractmethod
def map_weights_for_model(
self,
adapter_weights: Dict[int, AdapterWeights],
weight_names: Tuple[str],
) -> Tuple[ModuleMap, Set[str]]:
pass
| text-generation-inference/server/text_generation_server/adapters/config.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/adapters/config.py",
"repo_id": "text-generation-inference",
"token_count": 275
} |
from text_generation_server.layers.gptq import GPTQWeight
import torch
from exllama_kernels import make_q4, q4_matmul, prepare_buffers, set_tuning_params
# Dummy tensor to pass instead of g_idx since there is no way to pass "None" to a C++ extension
none_tensor = torch.empty((1, 1), device="meta")
def ext_make_q4(qweight, qzeros, scales, g_idx, device):
"""Construct Q4Matrix, return handle"""
return make_q4(
qweight, qzeros, scales, g_idx if g_idx is not None else none_tensor, device
)
def ext_q4_matmul(x, q4, q4_width):
"""Matrix multiplication, returns x @ q4"""
outshape = x.shape[:-1] + (q4_width,)
x = x.view(-1, x.shape[-1])
output = torch.empty((x.shape[0], q4_width), dtype=torch.float16, device=x.device)
q4_matmul(x, q4, output)
return output.view(outshape)
MAX_DQ = 1
MAX_INNER = 1
ACT_ORDER = False
DEVICE = None
TEMP_STATE = None
TEMP_DQ = None
def set_device(device):
global DEVICE
DEVICE = device
def create_exllama_buffers(max_total_tokens: int):
global MAX_DQ, MAX_INNER, ACT_ORDER, DEVICE, TEMP_STATE, TEMP_DQ
assert DEVICE is not None, "call set_device first"
if not ACT_ORDER:
max_total_tokens = 1
# This temp_state buffer is required to reorder X in the act-order case.
temp_state = torch.zeros(
(max_total_tokens, MAX_INNER), dtype=torch.float16, device=DEVICE
)
temp_dq = torch.zeros((1, MAX_DQ), dtype=torch.float16, device=DEVICE)
# This temp_dq buffer is required to dequantize weights when using cuBLAS, typically for the prefill.
prepare_buffers(DEVICE, temp_state, temp_dq)
matmul_recons_thd = 8
matmul_fused_remap = False
matmul_no_half2 = False
set_tuning_params(matmul_recons_thd, matmul_fused_remap, matmul_no_half2)
TEMP_STATE, TEMP_DQ = temp_state, temp_dq
class Ex4bitLinear(torch.nn.Module):
"""Linear layer implementation with per-group 4-bit quantization of the weights"""
def __init__(self, weight: GPTQWeight, bias):
super().__init__()
global MAX_DQ, MAX_INNER, ACT_ORDER, DEVICE
assert weight.bits == 4
self.device = weight.qweight.device
self.qweight = weight.qweight
self.qzeros = weight.qzeros
self.scales = weight.scales
self.g_idx = weight.g_idx.cpu() if weight.g_idx is not None else None
self.bias = bias if bias is not None else None
if self.g_idx is not None and (
(self.g_idx == 0).all()
or torch.equal(
weight.g_idx.cpu(),
torch.tensor(
[i // weight.groupsize for i in range(weight.g_idx.shape[0])],
dtype=torch.int32,
),
)
):
self.empty_g_idx = True
self.g_idx = None
assert self.device.type == "cuda"
assert self.device.index is not None
self.q4 = ext_make_q4(
self.qweight, self.qzeros, self.scales, self.g_idx, self.device.index
)
self.height = weight.qweight.shape[0] * 8
self.width = weight.qweight.shape[1]
# Infer groupsize from height of qzeros
self.groupsize = None
if self.qzeros.shape[0] > 1:
self.groupsize = (self.qweight.shape[0] * 8) // (self.qzeros.shape[0])
if self.groupsize is not None:
assert weight.groupsize == self.groupsize
# Handle act-order matrix
if self.g_idx is not None:
if self.groupsize is None:
raise ValueError("Found group index but no groupsize. What do?")
self.act_order = True
else:
self.act_order = False
DEVICE = self.qweight.device
MAX_DQ = max(MAX_DQ, self.qweight.numel() * 8)
if self.act_order:
MAX_INNER = max(MAX_INNER, self.height, self.width)
ACT_ORDER = True
def forward(self, x):
out = ext_q4_matmul(x, self.q4, self.width)
if self.bias is not None:
out.add_(self.bias)
return out
| text-generation-inference/server/text_generation_server/layers/gptq/exllama.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/gptq/exllama.py",
"repo_id": "text-generation-inference",
"token_count": 1888
} |
from typing import Optional, Protocol, runtime_checkable
import torch
import torch.nn as nn
from loguru import logger
from transformers.activations import ACT2FN
from text_generation_server.layers import (
TensorParallelColumnLinear,
TensorParallelRowLinear,
)
from text_generation_server.layers.fp8 import HybridFP8UnquantLoader
from text_generation_server.layers.marlin import GPTQMarlinWeightsLoader
from text_generation_server.layers.moe.gptq_marlin import (
GPTQMarlinSparseMoELayer,
can_use_marlin_moe_gemm,
)
from text_generation_server.layers.moe.unquantized import UnquantizedSparseMoELayer
from text_generation_server.layers.moe.fp8 import FP8SparseMoELayer
from text_generation_server.utils.import_utils import SYSTEM
from text_generation_server.utils.log import log_once
from text_generation_server.utils.weights import (
DefaultWeightsLoader,
Weights,
UnquantizedWeight,
)
if SYSTEM == "ipex":
from .fused_moe_ipex import fused_topk, grouped_topk
else:
from moe_kernels.fused_moe import fused_topk, grouped_topk
# NOTE: we are using a protocol here, because multiple inherance is not nice.
# We need `Module`, and `Module` -> some abstract class -> some concrete
# class inheritance is whacky.
@runtime_checkable
class MoELayer(Protocol):
def __init__(
self,
*,
n_expert_group: Optional[int],
n_experts: int,
prefix: str,
renormalize: bool,
topk: int,
topk_group: Optional[int],
weights: Weights,
gate_proj_name: str = "gate_proj",
up_proj_name: str = "up_proj",
down_proj_name: str = "down_proj",
hidden_act: str = "silu",
scoring_func: Optional[str] = None,
e_score_correction_bias: Optional[float] = None,
): ...
def forward(
self, x: torch.Tensor, *, gating_output: torch.Tensor
) -> torch.Tensor: ...
class DenseMoELayer(nn.Module):
"""
Layer for MoE that applies *all* experts to each tokens and then weights
their outputs based on the calculated routing. This layer is much slower
than `SparseMoELayer` and should only be used when no fused kernels are
available (e.g. for unsupported quantizers).
"""
def __init__(
self,
*,
n_expert_group: Optional[int],
n_experts: int,
prefix: str,
renormalize: bool,
topk: int,
topk_group: Optional[int],
weights: Weights,
gate_proj_name: str = "gate_proj",
up_proj_name: str = "up_proj",
down_proj_name: str = "down_proj",
hidden_act: str = "silu",
scoring_func: Optional[str] = None,
e_score_correction_bias: Optional[float] = None,
):
super().__init__()
assert scoring_func is None, "scoring func is not handled"
assert e_score_correction_bias is None, "scoring correction bias is not handled"
log_once(
logger.info,
"No fused layers are available for this model type, using (slower) dense MoE layer",
)
assert (n_expert_group is None) == (
topk_group is None
), "n_expert_group and topk_group must both be None or have some value"
self.n_expert_group = n_expert_group
self.n_experts = n_experts
self.renormalize = renormalize
self.topk = topk
self.topk_group = topk_group
if "gelu" in hidden_act:
self.act = lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh"
if hidden_act in ["gelu_fast", "gelu_pytorch_tanh"]
else "none"
),
)
elif "silu" in hidden_act:
self.act = torch.nn.functional.silu
else:
self.act = ACT2FN[hidden_act]
self.gate_proj = [
TensorParallelColumnLinear.load(
None,
prefix=f"{prefix}.{i}.{gate_proj_name}",
weights=weights,
bias=False,
)
for i in range(self.n_experts)
]
self.up_proj = [
TensorParallelColumnLinear.load(
None,
prefix=f"{prefix}.{i}.{up_proj_name}",
weights=weights,
bias=False,
)
for i in range(self.n_experts)
]
self.down_proj = [
TensorParallelRowLinear.load(
None,
prefix=f"{prefix}.{i}.{down_proj_name}",
weights=weights,
bias=False,
)
for i in range(self.n_experts)
]
self.process_group = weights.process_group
def forward(self, x: torch.Tensor, *, gating_output: torch.Tensor) -> torch.Tensor:
"""
x: (sequence_length, model_dim)
gating_output: (sequence_length, n_experts)
"""
# optional reshape
input_shape = x.shape
x = x.view(-1, input_shape[-1])
if self.n_expert_group is not None and self.topk_group is not None:
topk_weights, topk_ids = grouped_topk(
x,
gating_output,
self.topk,
renormalize=self.renormalize,
num_expert_group=self.n_expert_group,
topk_group=self.topk_group,
)
else:
topk_weights, topk_ids = fused_topk(
x, gating_output, self.topk, self.renormalize
)
topk_weights = topk_weights.to(x.dtype)
weights = torch.zeros(
topk_ids.shape[0], self.n_experts, dtype=x.dtype, device=x.device
)
weights.scatter_(1, topk_ids.long(), topk_weights.to(weights.dtype))
out = torch.zeros_like(x)
for i in range(self.n_experts):
h = self.act(self.gate_proj[i](x)) * self.up_proj[i](x)
h = self.down_proj[i](h, reduce=False)
out += h * weights[:, i].view(-1, 1)
return out
class SparseMoELayer(nn.Module):
"""
Layer for MoE that uses fused kernels to only apply the active experts
for each token (rather than applying all experts and selecting the
outputs of active experts).
"""
def __init__(
self,
*,
n_expert_group: Optional[int],
n_experts: int,
prefix: str,
renormalize: bool,
topk: int,
topk_group: Optional[int],
weights: Weights,
scoring_func: Optional[str] = "softmax",
e_score_correction_bias: Optional[float] = None,
gate_proj_name: str = "gate_proj",
up_proj_name: str = "up_proj",
down_proj_name: str = "down_proj",
):
super().__init__()
if (
isinstance(weights.loader, DefaultWeightsLoader)
and isinstance(weights.loader.weight_class, UnquantizedWeight)
) or isinstance(weights.loader, HybridFP8UnquantLoader):
if (
isinstance(weights.loader, HybridFP8UnquantLoader)
and weights.loader.to_fp8
):
cls = FP8SparseMoELayer
else:
cls = UnquantizedSparseMoELayer
elif isinstance(
weights.loader, GPTQMarlinWeightsLoader
) and can_use_marlin_moe_gemm(
quant_method=weights.loader.quant_method,
quantize=weights.loader.quantize,
sym=weights.loader.sym,
):
cls = GPTQMarlinSparseMoELayer
else:
raise ValueError(
f"Unsupported weights loader: {type(weights.loader)}, sparse MoE is only supported for unquantized, AWQ, and GPTQ weights"
)
log_once(
logger.info,
"Using MoE layer wih fused gemm",
)
self.moe = cls(
n_expert_group=n_expert_group,
n_experts=n_experts,
prefix=prefix,
renormalize=renormalize,
topk=topk,
topk_group=topk_group,
weights=weights,
scoring_func=scoring_func,
e_score_correction_bias=e_score_correction_bias,
gate_proj_name=gate_proj_name,
up_proj_name=up_proj_name,
down_proj_name=down_proj_name,
)
def forward(self, x: torch.Tensor, *, gating_output: torch.Tensor) -> torch.Tensor:
return self.moe(x, gating_output=gating_output)
@staticmethod
def is_supported(weights: Weights) -> bool:
return (
(
isinstance(weights.loader, DefaultWeightsLoader)
and isinstance(weights.loader.weight_class, UnquantizedWeight)
)
or isinstance(weights.loader, HybridFP8UnquantLoader)
or (
isinstance(weights.loader, GPTQMarlinWeightsLoader)
and can_use_marlin_moe_gemm(
quant_method=weights.loader.quant_method,
quantize=weights.loader.quantize,
sym=weights.loader.sym,
)
)
)
| text-generation-inference/server/text_generation_server/layers/moe/__init__.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/moe/__init__.py",
"repo_id": "text-generation-inference",
"token_count": 4545
} |
# coding=utf-8
# Copyright 2023, 2024 DeepSeek-AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Type
import torch
import torch.distributed
from torch import nn
from transformers.activations import ACT2FN
from transformers.configuration_utils import PretrainedConfig
from text_generation_server.layers import (
FastLinear,
SpeculativeHead,
TensorParallelColumnLinear,
TensorParallelEmbedding,
TensorParallelRowLinear,
get_linear,
)
from text_generation_server.layers.attention import (
Seqlen,
attention,
paged_attention,
)
from text_generation_server.layers.attention.kv_cache import KVCache, get_kv_scales
from text_generation_server.layers.layernorm import FastRMSNorm
from text_generation_server.layers.moe import DenseMoELayer, MoELayer, SparseMoELayer
from text_generation_server.layers.rotary import PositionRotaryEmbedding, get_mscale
from text_generation_server.utils.import_utils import SYSTEM
from text_generation_server.utils.weights import Weights
if SYSTEM == "rocm":
try:
import vllm._custom_ops as ops
except Exception as e:
raise ImportError(f"Could not load `vllm._custom_ops`. Full error: {e}")
class DeepseekV2Config(PretrainedConfig):
def __init__(
self,
vocab_size=102400,
hidden_size=4096,
intermediate_size=11008,
moe_intermediate_size=1407,
num_hidden_layers=30,
num_attention_heads=32,
num_key_value_heads=32,
n_shared_experts=2,
n_routed_experts=160,
ep_size=1,
routed_scaling_factor=1.0,
kv_lora_rank=512,
q_lora_rank=1536,
qk_rope_head_dim=64,
v_head_dim=128,
qk_nope_head_dim=128,
topk_method="gready",
n_group=8,
topk_group=3,
num_experts_per_tok=6,
moe_layer_freq=1,
first_k_dense_replace=0,
norm_topk_prob=False,
scoring_func="softmax",
aux_loss_alpha=0.001,
seq_aux=True,
hidden_act="silu",
max_position_embeddings=2048,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
pad_token_id=None,
bos_token_id=100000,
eos_token_id=100001,
pretraining_tp=1,
tie_word_embeddings=False,
rope_theta=10000.0,
rope_scaling=None,
attention_bias=False,
attention_dropout=0.0,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.moe_intermediate_size = moe_intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.n_shared_experts = n_shared_experts
self.n_routed_experts = n_routed_experts
self.ep_size = ep_size
self.routed_scaling_factor = routed_scaling_factor
self.kv_lora_rank = kv_lora_rank
self.q_lora_rank = q_lora_rank
self.qk_rope_head_dim = qk_rope_head_dim
self.v_head_dim = v_head_dim
self.qk_nope_head_dim = qk_nope_head_dim
self.topk_method = topk_method
self.n_group = n_group
self.topk_group = topk_group
self.num_experts_per_tok = num_experts_per_tok
self.moe_layer_freq = moe_layer_freq
self.first_k_dense_replace = first_k_dense_replace
self.norm_topk_prob = norm_topk_prob
self.scoring_func = scoring_func
self.aux_loss_alpha = aux_loss_alpha
self.seq_aux = seq_aux
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
tie_word_embeddings = kwargs.pop("tie_word_embeddings", False)
if tie_word_embeddings:
raise ValueError(
"tie_word_embeddings is not supported for Deepseek V2 models."
)
if ep_size != 1:
raise ValueError(
f"Currently only ep_size == 1 is supported for Deepseek V2 models, was {ep_size}"
)
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
class DeepseekV2Attention(torch.nn.Module):
def __init__(
self,
prefix: str,
config,
weights: Weights,
):
super().__init__()
self.num_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.kv_lora_rank = config.kv_lora_rank
self.q_lora_rank = config.q_lora_rank
self.qk_nope_head_dim = config.qk_nope_head_dim
self.qk_rope_head_dim = config.qk_rope_head_dim
self.head_size = config.qk_nope_head_dim + config.qk_rope_head_dim
self.value_head_size = config.v_head_dim
self.head_pad_size = max(self.head_size, self.value_head_size)
self.rotary_emb = PositionRotaryEmbedding.static(
config=config,
dim=self.qk_rope_head_dim,
base=config.rope_theta,
device=weights.device,
)
mscale = get_mscale(
self.rotary_emb.scaling_factor, self.rotary_emb.mscale_all_dim
)
self.softmax_scale = self.head_size**-0.5 * mscale * mscale
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.num_key_value_heads = (
config.num_key_value_heads // weights.process_group.size()
)
if self.q_lora_rank is None:
self.q_proj = TensorParallelColumnLinear.load(
config,
prefix=f"{prefix}.q_proj",
weights=weights,
bias=config.attention_bias,
)
else:
self.q_a_proj = get_linear(
weight=weights.get_weights(f"{prefix}.q_a_proj"),
bias=(
weights.get_tensor(f"{prefix}.q_a_proj.bias")
if config.attention_bias
else None
),
)
self.q_a_layernorm = FastRMSNorm.load(
prefix=f"{prefix}.q_a_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
self.q_b_proj = TensorParallelColumnLinear.load(
config,
prefix=f"{prefix}.q_b_proj",
weights=weights,
bias=config.attention_bias,
)
self.kv_a_proj_with_mqa = get_linear(
weight=weights.get_weights(f"{prefix}.kv_a_proj_with_mqa"),
bias=(
weights.get_tensor(f"{prefix}.kv_a_proj_with_mqa.bias")
if config.attention_bias
else None
),
)
self.kv_scales = get_kv_scales(weights, f"{prefix}")
self.kv_a_layernorm = FastRMSNorm.load(
prefix=f"{prefix}.kv_a_layernorm", weights=weights, eps=config.rms_norm_eps
)
self.kv_b_proj = TensorParallelColumnLinear.load(
config,
prefix=f"{prefix}.kv_b_proj",
weights=weights,
bias=config.attention_bias,
)
self.o_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.o_proj",
weights=weights,
bias=False,
)
self.num_groups = self.num_heads // self.num_key_value_heads
self.kv_head_mapping = torch.arange(
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
).repeat_interleave(self.num_groups)
def forward(
self,
hidden_states: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
cu_seqlen_prefill: torch.Tensor,
kv_cache: KVCache,
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
):
if self.q_lora_rank is None:
query = self.q_proj(hidden_states)
else:
query = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states))[0])
query = query.view(-1, self.num_heads, self.head_size)
_, query_pe = torch.split(
query, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1
)
compressed_kv = self.kv_a_proj_with_mqa(hidden_states)
compressed_kv, key_pe = torch.split(
compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1
)
key_pe = key_pe.view(-1, 1, self.qk_rope_head_dim)
kv = self.kv_b_proj(self.kv_a_layernorm(compressed_kv.contiguous())[0]).view(
-1, self.num_key_value_heads, self.qk_nope_head_dim + self.value_head_size
)
key_nope, value = torch.split(
kv, [self.qk_nope_head_dim, self.value_head_size], dim=-1
)
batch_size, heads, head_dim = query_pe.shape
query_pe = (
query_pe.view(batch_size, heads, head_dim // 2, 2)
.transpose(2, 3)
.reshape(batch_size, heads, head_dim)
)
batch_size, heads, head_dim = key_pe.shape
key_pe = (
key_pe.view(batch_size, heads, head_dim // 2, 2)
.transpose(2, 3)
.reshape(batch_size, heads, head_dim)
)
self.rotary_emb(query_pe, key_pe, cos, sin)
query[..., self.qk_nope_head_dim :] = query_pe
key = torch.empty_like(query)
key[..., : self.qk_nope_head_dim] = key_nope
key[..., self.qk_nope_head_dim :] = key_pe
# We need to pad the heads because Flash Attention does not support
# qk and v with different head sizes.
query = torch.nn.functional.pad(
query, (0, self.head_pad_size - self.head_size), value=0
)
key = torch.nn.functional.pad(
key, (0, self.head_pad_size - self.head_size), value=0
)
value = torch.nn.functional.pad(
value, (0, self.head_pad_size - self.value_head_size), value=0
)
kv_cache.store(
key=key,
value=value,
slots=slots,
kv_scales=self.kv_scales,
)
# Prefill
if cu_seqlen_prefill is not None:
# flash attention
attn_output = attention(
query=query,
key=key,
value=value,
kv_cache=kv_cache,
kv_scales=self.kv_scales,
seqlen=seqlen,
block_tables=block_tables,
softmax_scale=self.softmax_scale,
)
# Decode
else:
attn_output = paged_attention(
query,
kv_cache,
self.kv_head_mapping,
self.softmax_scale,
block_tables,
seqlen,
max_s,
kv_scales=self.kv_scales,
)
# Remove padding.
attn_output = attn_output[..., : self.value_head_size]
return self.o_proj(
attn_output.reshape(-1, self.num_heads * self.value_head_size)
)
class DeepseekV2MLP(nn.Module):
def __init__(self, prefix: str, config, weights, intermediate_size: int):
super().__init__()
self.hidden_act = config.hidden_act
if self.hidden_act != "silu":
# Bail out because MoE only supports silu.
raise NotImplementedError(
"Currently only `silu` is supported as an activation for Deepseek V2."
)
self.act = ACT2FN[self.hidden_act]
self.gate_up_proj = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"],
weights=weights,
dim=0,
bias=False,
)
self.down_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.down_proj",
weights=weights,
bias=False,
)
self.intermediate_size = intermediate_size // weights.process_group.size()
# TODO: This is a hotfix to be removed & properly refactored.
self.quantize = config.quantize
def forward(self, hidden_states: torch.Tensor, reduce: bool = True):
if (
SYSTEM == "rocm"
and self.hidden_act == "silu"
and hidden_states.dtype == torch.float16
and hidden_states.shape[0] == 1
and not self.quantize
):
out = torch.empty(
hidden_states.shape[0],
self.intermediate_size,
dtype=hidden_states.dtype,
device="cuda",
)
ops.LLMM_Silu(self.gate_up_proj.linear.weight, hidden_states, out, 8)
return self.down_proj(out, reduce=reduce)
else:
gate_up_states = self.gate_up_proj(hidden_states)
gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size)
return self.down_proj(
self.act(gate_up_states[:, 0]) * gate_up_states[:, 1], reduce=reduce
)
class DeepseekV2MoE(nn.Module):
def __init__(
self,
prefix,
config: DeepseekV2Config,
moe_layer_cls: Type[MoELayer],
weights,
):
super().__init__()
self.hidden_dim = config.hidden_size
self.moe_intermediate_size = (
config.moe_intermediate_size // weights.process_group.size()
)
self.routed_scaling_factor = config.routed_scaling_factor
# Gating
self.gate = FastLinear.load(config, f"{prefix}.gate", weights, bias=False)
self.moe_layer = moe_layer_cls(
prefix=f"{prefix}.experts",
n_experts=config.n_routed_experts,
n_expert_group=config.n_group,
renormalize=config.norm_topk_prob,
topk=config.num_experts_per_tok,
topk_group=config.topk_group,
weights=weights,
)
assert isinstance(self.moe_layer, MoELayer)
if config.n_shared_experts is not None:
self.shared_experts = DeepseekV2MLP(
prefix=f"{prefix}.shared_experts",
config=config,
weights=weights,
intermediate_size=config.moe_intermediate_size
* config.n_shared_experts,
)
else:
self.shared_experts = None
self.process_group = weights.process_group
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.shared_experts is not None:
shared_output = self.shared_experts(x, reduce=False)
else:
shared_output = None
router_logits = self.gate(x)
out = self.moe_layer(x, gating_output=router_logits)
if shared_output is not None:
out = out + shared_output
# Reduce sum
if self.process_group.size() > 1:
torch.distributed.all_reduce(out, group=self.process_group)
return out.view(*x.shape)
class DeepseekV2Layer(nn.Module):
def __init__(self, prefix, layer_id, config, weights):
super().__init__()
prefix = f"{prefix}.layers.{layer_id}"
self.self_attn = DeepseekV2Attention(
prefix=f"{prefix}.self_attn",
config=config,
weights=weights,
)
if (
config.n_routed_experts is not None
and layer_id >= config.first_k_dense_replace
and layer_id % config.moe_layer_freq == 0
):
moe_layer_cls = (
SparseMoELayer
if SparseMoELayer.is_supported(weights)
else DenseMoELayer
)
self.mlp = DeepseekV2MoE(f"{prefix}.mlp", config, moe_layer_cls, weights)
else:
self.mlp = DeepseekV2MLP(
prefix=f"{prefix}.mlp",
config=config,
weights=weights,
intermediate_size=config.intermediate_size,
)
self.input_layernorm = FastRMSNorm.load(
prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps
)
self.post_attention_layernorm = FastRMSNorm.load(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
def forward(
self,
hidden_states: torch.Tensor,
residual: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
cu_seqlen_prefill: torch.Tensor,
kv_cache,
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
):
normed_hidden_states, residual = self.input_layernorm(hidden_states, residual)
# Self Attention
attn_output = self.self_attn(
normed_hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
# faster post attention rms norm
normed_attn_res_output, residual = self.post_attention_layernorm(
attn_output, residual
)
output = self.mlp(normed_attn_res_output)
return output, residual
class DeepseekV2Model(torch.nn.Module):
def __init__(self, prefix: str, config, weights: Weights):
super().__init__()
self.embed_tokens = TensorParallelEmbedding(
prefix=f"{prefix}.embed_tokens", weights=weights
)
self.layers = nn.ModuleList(
[
DeepseekV2Layer(
prefix,
layer_id,
config,
weights,
)
for layer_id in range(config.num_hidden_layers)
]
)
self.norm = FastRMSNorm.load(
prefix=f"{prefix}.norm", weights=weights, eps=config.rms_norm_eps
)
self.head_size = self.layers[0].self_attn.head_size
self.num_heads = self.layers[0].self_attn.num_heads
self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
) -> torch.Tensor:
hidden_states = self.embed_tokens(input_ids)
# Get rotary cos and sin for this forward
# Avoid to index in each layer
cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(
position_ids, max_s, hidden_states.dtype
)
residual = None
for i, layer in enumerate(self.layers):
hidden_states, residual = layer(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache[i],
block_tables,
slots,
seqlen,
max_s,
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class FlashDeepseekV2ForCausalLM(torch.nn.Module):
def __init__(self, prefix: str, config, weights: Weights):
super().__init__()
self.model = DeepseekV2Model(
"model" if not prefix else f"{prefix}.model", config, weights
)
self.lm_head = SpeculativeHead.load(
config,
prefix="lm_head" if not prefix else f"{prefix}.lm_head",
weights=weights,
)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
lm_head_indices: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
hidden_states = self.model(
input_ids,
position_ids,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits, speculative_logits = self.lm_head(hidden_states)
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_deepseek_v2_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 11480
} |
# coding=utf-8
# Copyright 2024 Starcoder2 AI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed
from torch import nn
from transformers.activations import ACT2FN
from transformers.configuration_utils import PretrainedConfig
from typing import Optional, List, Tuple
from text_generation_server.layers.attention import (
paged_attention,
attention,
Seqlen,
)
from text_generation_server.layers import (
TensorParallelMultiAdapterLinear,
TensorParallelAdapterRowLinear,
TensorParallelRowLinear,
TensorParallelColumnLinear,
TensorParallelEmbedding,
SpeculativeHead,
get_linear,
)
from text_generation_server.layers.attention.kv_cache import get_kv_scales
from text_generation_server.layers.layernorm import (
FastLayerNorm,
FastRMSNorm,
)
from text_generation_server.layers.rotary import (
PositionRotaryEmbedding,
)
from text_generation_server.utils.weights import UnquantizedWeight
class Starcoder2Config(PretrainedConfig):
model_type = "starcoder2"
def __init__(
self,
vocab_size=49152,
hidden_size=3072,
intermediate_size=12288,
num_hidden_layers=30,
num_attention_heads=24,
num_key_value_heads=2,
mlp_type="default",
hidden_act="gelu_pytorch_tanh",
max_position_embeddings=4096,
initializer_range=0.018042,
norm_type="layer_norm",
norm_epsilon=1e-5,
use_cache=True,
bos_token_id=50256,
eos_token_id=50256,
rope_theta=10000.0,
sliding_window=None,
attention_dropout=0.0,
residual_dropout=0.0,
embedding_dropout=0.0,
use_bias: bool = True,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.sliding_window = sliding_window
self.use_bias = use_bias
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.mlp_type = mlp_type
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.norm_type = norm_type
self.norm_epsilon = norm_epsilon
self.use_cache = use_cache
self.rope_theta = rope_theta
self.attention_dropout = attention_dropout
self.residual_dropout = residual_dropout
self.embedding_dropout = embedding_dropout
super().__init__(
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
**kwargs,
)
def load_attention(config, prefix, weights, layer_id):
prefixes = [f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"]
head_size = config.hidden_size // config.num_attention_heads
sizes = [
head_size * config.num_attention_heads,
head_size * config.num_key_value_heads,
head_size * config.num_key_value_heads,
]
if config.num_attention_heads != config.num_key_value_heads:
base_layer = _load_gqa(config, prefix, weights)
else:
base_layer = TensorParallelColumnLinear.load_multi(
config,
prefixes=prefixes,
dim=0,
weights=weights,
bias=config.use_bias,
)
return TensorParallelMultiAdapterLinear.load(
base_layer=base_layer,
layer_id=layer_id,
layer_names=prefixes,
sizes=sizes,
process_group=weights.process_group,
)
def _load_gqa(config, prefix: str, weights):
assert config.hidden_size % config.num_attention_heads == 0
assert config.num_attention_heads % weights.process_group.size() == 0
weight = weights.get_multi_weights_col(
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
)
if isinstance(weight, UnquantizedWeight):
weight.weight = weight.weight.to(dtype=weights.dtype).to(device=weights.device)
head_size = config.hidden_size // config.num_attention_heads
num_heads = config.num_attention_heads // weights.process_group.size()
num_key_value_heads = config.num_key_value_heads // weights.process_group.size()
assert list(weight.weight.shape) == [
(num_heads + 2 * num_key_value_heads) * head_size,
config.hidden_size,
], f"{list(weight.weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}"
if config.use_bias:
w = [
weights.get_sharded(f"{p}.bias", dim=0)
for p in [f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"]
]
bias = torch.cat(w, dim=0).to(dtype=weights.dtype).to(device=weights.device)
else:
bias = None
return TensorParallelColumnLinear(get_linear(weight, bias=bias))
class Starcoder2Attention(torch.nn.Module):
def __init__(
self,
index: int,
prefix: str,
config,
weights,
):
super().__init__()
self.max_past = (
config.sliding_window if config.sliding_window is not None else -1
)
self.num_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.head_size = self.hidden_size // self.num_heads
self.rotary_emb = PositionRotaryEmbedding.static(
config=config,
dim=self.head_size,
base=config.rope_theta,
device=weights.device,
)
self.softmax_scale = self.head_size**-0.5
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.num_key_value_heads = (
config.num_key_value_heads // weights.process_group.size()
)
self.query_key_value = load_attention(config, prefix, weights, index)
self.kv_scales = get_kv_scales(weights, f"{prefix}")
o_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.o_proj",
weights=weights,
bias=getattr(config, "use_bias", False),
)
self.o_proj = TensorParallelAdapterRowLinear.load(
o_proj,
index,
"o_proj",
process_group=weights.process_group,
)
self.num_groups = self.num_heads // self.num_key_value_heads
self.kv_head_mapping = torch.arange(
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
).repeat_interleave(self.num_groups)
def forward(
self,
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
adapter_data,
):
qkv = self.query_key_value(hidden_states, adapter_data)
query, kv = qkv.split(
[
self.head_size * self.num_heads,
2 * self.head_size * self.num_key_value_heads,
],
dim=1,
)
query = query.view(-1, self.num_heads, self.head_size)
kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size)
self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin)
if prefill_cache_indices is not None:
kv_to_cache = kv[prefill_cache_indices]
else:
kv_to_cache = kv
kv_cache.store(
key=kv_to_cache[:, 0],
value=kv_to_cache[:, 1],
slots=slots,
kv_scales=self.kv_scales,
)
# Prefill
if cu_seqlen_prefill is not None:
# flash attention
attn_output = attention(
query=query,
key=kv_to_cache[:, 0],
value=kv_to_cache[:, 1],
kv_cache=kv_cache,
kv_scales=self.kv_scales,
seqlen=seqlen,
block_tables=block_tables,
softmax_scale=self.softmax_scale,
window_size_left=self.max_past,
)
# Decode
else:
attn_output = paged_attention(
query,
kv_cache,
self.kv_head_mapping,
self.softmax_scale,
block_tables,
seqlen,
max_s,
kv_scales=self.kv_scales,
)
return self.o_proj(
attn_output.view(-1, self.num_heads * self.head_size), adapter_data
)
class Starcoder2MLP(nn.Module):
def __init__(self, prefix, config, weights, index):
super().__init__()
act = config.hidden_act
self.act = (
ACT2FN[act]
if "gelu" not in act
else lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
),
)
)
# Fuse gate and up proj
c_fc = TensorParallelColumnLinear.load(
config,
prefix=f"{prefix}.c_fc",
weights=weights,
bias=config.use_bias,
)
c_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.c_proj",
weights=weights,
bias=config.use_bias,
)
self.c_fc = TensorParallelMultiAdapterLinear.load(
c_fc,
layer_id=index,
layer_names=[f"{prefix}.c_fc"],
sizes=[config.intermediate_size, config.intermediate_size],
process_group=weights.process_group,
)
self.c_proj = TensorParallelAdapterRowLinear.load(
c_proj,
index,
"c_proj",
process_group=weights.process_group,
)
def forward(self, hidden_states, adapter_data):
hidden_states = self.c_fc(hidden_states, adapter_data)
hidden_states = self.act(hidden_states)
return self.c_proj(hidden_states, adapter_data)
class Starcoder2GatedMLP(nn.Module):
def __init__(self, index, prefix, config, weights):
super().__init__()
act = config.hidden_act
self.act = (
ACT2FN[act]
if "gelu" not in act
else lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
),
)
)
# Fuse gate and up proj
prefixes = [f"{prefix}.gate_proj", f"{prefix}.up_proj"]
sizes = [
config.intermediate_size,
config.intermediate_size,
]
gate_up_proj = TensorParallelColumnLinear.load_multi(
config,
prefixes=prefixes,
weights=weights,
dim=0,
bias=config.use_bias,
)
self.gate_up_proj = TensorParallelMultiAdapterLinear.load(
gate_up_proj,
index,
layer_names=prefixes,
sizes=sizes,
process_group=weights.process_group,
)
down_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.down_proj",
weights=weights,
bias=config.use_bias,
)
self.down_proj = TensorParallelAdapterRowLinear.load(
down_proj,
index,
"down_proj",
process_group=weights.process_group,
)
self.intermediate_size = (
config.intermediate_size // weights.process_group.size()
)
def forward(self, hidden_states, adapter_data):
gate_up_states = self.gate_up_proj(hidden_states, adapter_data)
gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size)
return self.down_proj(
self.act(gate_up_states[:, 0]) * gate_up_states[:, 1], adapter_data
)
STARCODER2_NORMALIZATION_CLASSES = {
"layer_norm": FastLayerNorm,
"rms_norm": FastRMSNorm,
}
STARCODER2_MLP_CLASSES = {
"default": Starcoder2MLP,
"gated": Starcoder2GatedMLP,
}
class Starcoder2Layer(nn.Module):
def __init__(self, layer_id, config, weights):
super().__init__()
prefix = f"model.layers.{layer_id}"
self.self_attn = Starcoder2Attention(
prefix=f"{prefix}.self_attn", config=config, weights=weights, index=layer_id
)
self.mlp = STARCODER2_MLP_CLASSES[config.mlp_type](
prefix=f"{prefix}.mlp", config=config, weights=weights, index=layer_id
)
self.input_layernorm = STARCODER2_NORMALIZATION_CLASSES[config.norm_type].load(
prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.norm_epsilon
)
self.post_attention_layernorm = STARCODER2_NORMALIZATION_CLASSES[
config.norm_type
].load(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.norm_epsilon,
)
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
adapter_data,
):
normed_hidden_states, res = self.input_layernorm(hidden_states, residual)
# Self Attention
attn_output = self.self_attn(
normed_hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
adapter_data,
)
# faster post attention rms norm
normed_attn_res_output, attn_res = self.post_attention_layernorm(
attn_output, res
)
mlp_output = self.mlp(normed_attn_res_output, adapter_data)
return mlp_output, attn_res
class Starcoder2Model(torch.nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
process_group = weights.process_group
self.tp_rank = process_group.rank()
self.tp_world_size = process_group.size()
self.embed_tokens = TensorParallelEmbedding(
prefix=f"{prefix}.embed_tokens", weights=weights
)
self.layers = nn.ModuleList(
[
Starcoder2Layer(
layer_id,
config,
weights,
)
for layer_id in range(config.num_hidden_layers)
]
)
self.norm = STARCODER2_NORMALIZATION_CLASSES[config.norm_type].load(
prefix=f"{prefix}.norm", weights=weights, eps=config.norm_epsilon
)
self.gradient_checkpointing = False
self.head_size = self.layers[0].self_attn.head_size
self.num_heads = self.layers[0].self_attn.num_heads
self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
true_max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
adapter_data,
) -> torch.Tensor:
hidden_states = self.embed_tokens(input_ids)
# Get rotary cos and sin for this forward
# Avoid to index in each layer
cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(
position_ids, true_max_s, hidden_states.dtype
)
residual = None
for i, layer in enumerate(self.layers):
hidden_states, residual = layer(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache[i],
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
adapter_data,
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class FlashStarcoder2ForCausalLM(torch.nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
if not prefix:
prefix = "model"
else:
prefix = f"{prefix}.model"
self.model = Starcoder2Model(prefix, config, weights)
try:
self.lm_head = SpeculativeHead.load(
config,
prefix="lm_head",
weights=weights,
)
except RuntimeError:
self.lm_head = SpeculativeHead.load(
config,
prefix=f"{prefix}.embed_tokens",
weights=weights,
)
self.max_past = config.sliding_window
self.max_past_tensor = (
torch.tensor(config.sliding_window, device=weights.device)
if self.max_past is not None
else None
)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
lm_head_indices: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
) -> torch.Tensor:
true_max_s = max_s
if prefill_cache_indices is not None:
# Slots also need to be sliced as it has the same size as the whole kv tensor
slots = slots[prefill_cache_indices]
elif self.max_past is not None:
# Clamp in decode mode as paged attention requires clamped values whereas the flash attention
# kernel requires the true values
seqlen = seqlen.clamp(max=self.max_past_tensor)
hidden_states = self.model(
input_ids,
position_ids,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
true_max_s,
prefill_cache_indices,
adapter_data,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits = self.lm_head(hidden_states)
return logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 10078
} |
# coding=utf-8
# Copyright 2024 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Qwen2 VL model."""
from typing import Optional, Tuple, List
import torch
import torch.utils.checkpoint
from torch import nn
from text_generation_server.utils.import_utils import SYSTEM
if SYSTEM == "ipex":
import intel_extension_for_pytorch as ipex
else:
import flash_attn_2_cuda
import numpy as np
from transformers.activations import ACT2FN
import torch.nn.functional as F
from text_generation_server.layers.layernorm import FastLayerNorm, FastRMSNorm
from text_generation_server.layers import (
TensorParallelColumnLinear,
TensorParallelRowLinear,
TensorParallelEmbedding,
SpeculativeHead,
)
from text_generation_server.layers.attention import (
Seqlen,
)
from text_generation_server.models.custom_modeling.flash_qwen2_modeling import (
Qwen2Model,
)
# Copied from transformers.models.llama.modeling_llama.rotate_half
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb_vision(
tensor: torch.Tensor, freqs: torch.Tensor
) -> torch.Tensor:
orig_dtype = tensor.dtype
tensor = tensor.float()
cos = freqs.cos()
sin = freqs.sin()
cos = cos.unsqueeze(1).repeat(1, 1, 2).unsqueeze(0).float()
sin = sin.unsqueeze(1).repeat(1, 1, 2).unsqueeze(0).float()
output = (tensor * cos) + (rotate_half(tensor) * sin)
output = output.to(orig_dtype)
return output
class Qwen2VLAttention(nn.Module):
def __init__(self, *, prefix, config, weights):
super().__init__()
self.embed_dim = config.embed_dim // weights.process_group.size()
self.head_dim = config.hidden_size // config.num_heads
self.num_heads = config.num_heads // weights.process_group.size()
self.qkv = TensorParallelColumnLinear.load_qkv(
config,
prefix=f"{prefix}.qkv",
weights=weights,
bias=False,
num_heads=self.num_heads,
num_key_value_heads=self.num_heads,
)
self.qkv.linear.bias = weights.get_sharded(f"{prefix}.qkv.bias", dim=0)
self.proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.proj",
weights=weights,
bias=True,
)
self.softmax_scale = 1.0 / np.sqrt(self.embed_dim // self.num_heads)
def forward(
self,
hidden_state: torch.Tensor,
cu_seqlens: torch.Tensor,
rotary_pos_emb: torch.Tensor,
max_seqlen: int,
) -> torch.Tensor:
# apply the qkv linear layer to the hidden state
qkv = self.qkv(hidden_state)
query, key, value = qkv.split(
[self.embed_dim, self.embed_dim, self.embed_dim], dim=1
)
# reshape the query, key, and value tensors
_shape = (
hidden_state.shape[0],
self.num_heads,
self.embed_dim // self.num_heads,
)
query = query.view(*_shape)
key = key.view(*_shape)
value = value.view(*_shape)
# apply rotary positional embeddings
query = apply_rotary_pos_emb_vision(query.unsqueeze(0), rotary_pos_emb).squeeze(
0
)
key = apply_rotary_pos_emb_vision(key.unsqueeze(0), rotary_pos_emb).squeeze(0)
# calc maximum sequence length for any batch
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
causal = False
# execute flash attention
if SYSTEM == "ipex":
attn_output = torch.empty_like(query)
ipex.llm.functional.varlen_attention(
(query.contiguous() if query.device.type == "xpu" else query),
(key.contiguous() if key.device.type == "xpu" else key),
(value.contiguous() if value.device.type == "xpu" else value),
attn_output,
cu_seqlens,
cu_seqlens,
max_seqlen,
max_seqlen,
0.0,
self.softmax_scale,
False,
causal,
False,
None,
)
else:
attn_output = flash_attn_2_cuda.varlen_fwd(
query,
key,
value,
None, # tmp buffer (auto-allocated)
cu_seqlens, # cu_seqlens_q
cu_seqlens, # cu_seqlens_k
None, # max_seqlen_q (auto-computed)
None, # max_seqlen_k (auto-computed)
None, # block_tables
None, # broadcast_mask
max_seqlen, # max_seqlen
max_seqlen, # max_seqlen
0.0, # dropout_p
self.softmax_scale,
False, # zero_tensors
causal, # causal attention within each sequence
-1, # window_size_left
-1, # window_size_right
0.0, # softmax_cap
False, # deterministic
None, # rng_state
)[0]
# reshape output to original dimensions
attn_output = attn_output.reshape(hidden_state.shape[0], -1)
attn_output = self.proj(attn_output)
return attn_output
class Qwen2VLVisionMLP(nn.Module):
def __init__(self, *, prefix, config, weights):
super().__init__()
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = TensorParallelColumnLinear.load(
prefix=f"{prefix}.fc1", weights=weights, config=config, bias=True
)
self.fc2 = TensorParallelRowLinear.load(
prefix=f"{prefix}.fc2", weights=weights, config=config, bias=True
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
class Qwen2VLVisionBlock(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
self.attn = Qwen2VLAttention(
prefix=f"{prefix}.attn",
config=config,
weights=weights,
)
self.norm1 = FastLayerNorm.load(
prefix=f"{prefix}.norm1",
weights=weights,
eps=1e-6,
)
self.norm2 = FastLayerNorm.load(
prefix=f"{prefix}.norm2",
weights=weights,
eps=1e-6,
)
self.mlp = Qwen2VLVisionMLP(
prefix=f"{prefix}.mlp",
config=config,
weights=weights,
)
def forward(
self, hidden_states, cu_seqlens, rotary_pos_emb, max_seqlen
) -> torch.Tensor:
norm1_out, residual = self.norm1(hidden_states)
attn_out = self.attn(norm1_out, cu_seqlens, rotary_pos_emb, max_seqlen)
hidden_states = attn_out + residual
norm2_out, residual = self.norm2(hidden_states)
hidden_states = hidden_states + self.mlp(norm2_out)
return hidden_states
class Qwen2VLPatchMerger(nn.Module):
def __init__(self, *, prefix, config, weights):
super().__init__()
self.hidden_size = config.embed_dim * (config.spatial_merge_size**2)
self.patch_merger_ln_q = FastLayerNorm.load(
prefix=f"{prefix}.ln_q",
weights=weights,
eps=1e-6,
)
self.fc1 = TensorParallelColumnLinear.load(
prefix=f"{prefix}.mlp.0", weights=weights, config=config, bias=True
)
self.fc2 = TensorParallelRowLinear.load(
prefix=f"{prefix}.mlp.2", weights=weights, config=config, bias=True
)
def forward(self, hidden_states) -> torch.Tensor:
hidden_states, _ = self.patch_merger_ln_q(hidden_states)
hidden_states = hidden_states.view(-1, self.hidden_size)
hidden_states = self.fc1(hidden_states)
hidden_states = F.gelu(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
class Qwen2VisionModel(nn.Module):
def __init__(self, *, prefix, config, weights):
super().__init__()
self.spatial_merge_size = config.spatial_merge_size
kernel_size = [config.temporal_patch_size, config.patch_size, config.patch_size]
self.patch_embedding = nn.Conv3d(
in_channels=config.in_chans,
out_channels=config.embed_dim,
kernel_size=kernel_size,
stride=kernel_size,
bias=False,
)
self.patch_embedding.weight = nn.Parameter(
weights.get_tensor(f"{prefix}.patch_embed.proj.weight"), requires_grad=False
)
head_dim = config.embed_dim // config.num_heads
# TODO: replace with static positional embeddings once implemented
theta = 10000.0
dim = head_dim // 2
inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.blocks = nn.ModuleList(
[
Qwen2VLVisionBlock(
prefix=f"{prefix}.blocks.{i}",
config=config,
weights=weights,
)
for i in range(config.depth)
]
)
self.merger = Qwen2VLPatchMerger(
prefix=f"{prefix}.merger",
config=config,
weights=weights,
)
self.temporal_patch_size = config.temporal_patch_size
self.spatial_patch_size = config.spatial_patch_size
self.in_channels = config.in_channels
self.embed_dim = config.embed_dim
def apply_class_embedding(self, hidden_state: torch.Tensor) -> torch.Tensor:
batch_size, _, hidden_size = hidden_state.shape
class_embedding = self.class_embedding.expand(batch_size, 1, hidden_size)
hidden_state = torch.cat([class_embedding, hidden_state], dim=1)
return hidden_state
def forward(
self,
pixel_values: torch.Tensor,
grid_thw: Optional[torch.LongTensor] = None,
) -> torch.Tensor:
# reshape the input tensor for processing
shape = (
-1,
self.in_channels,
self.temporal_patch_size,
self.spatial_patch_size,
self.spatial_patch_size,
)
pixel_values = pixel_values.view(shape).to(self.patch_embedding.weight.dtype)
hidden_states = self.patch_embedding(pixel_values).view(-1, self.embed_dim)
# TODO: revisit to see if we can avoid some of these reshapes
# find the position ids for the input tensor based on the grid_thw
pos_ids = []
for t, h, w in grid_thw:
hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
hpos_ids = hpos_ids.reshape(
h // self.spatial_merge_size,
self.spatial_merge_size,
w // self.spatial_merge_size,
self.spatial_merge_size,
)
hpos_ids = hpos_ids.permute(0, 2, 1, 3)
hpos_ids = hpos_ids.flatten()
wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
wpos_ids = wpos_ids.reshape(
h // self.spatial_merge_size,
self.spatial_merge_size,
w // self.spatial_merge_size,
self.spatial_merge_size,
)
wpos_ids = wpos_ids.permute(0, 2, 1, 3)
wpos_ids = wpos_ids.flatten()
pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
pos_ids = torch.cat(pos_ids, dim=0)
max_grid_size = grid_thw[:, 1:].max()
# apply the positional embeddings to the position ids
seq = torch.arange(
max_grid_size, device=self.inv_freq.device, dtype=self.inv_freq.dtype
)
rotary_pos_emb_full = torch.outer(seq, self.inv_freq)
rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
rotary_pos_emb = rotary_pos_emb.to(hidden_states.device, hidden_states.dtype)
# create a cu_seqlens tensor to be used in the attention mask
cu_seqlens = torch.repeat_interleave(
grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]
).cumsum(dim=0, dtype=torch.int32)
cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
max_seqlen = torch.max(cu_seqlens[1:] - cu_seqlens[:-1])
# iterately apply the blocks to the hidden states
for block in self.blocks:
hidden_states = block(hidden_states, cu_seqlens, rotary_pos_emb, max_seqlen)
# apply the final patch merger to the hidden states
hidden_states = self.merger(hidden_states)
return hidden_states
class Qwen2VLForConditionalGeneration(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
self.config = config
config.vision_config.quantize = None
config.vision_config.speculator = config.speculator
# set rope_scaling.type == "mrope" since AutoConfig.from_pretrained incorrectly
# returns rope_scaling.type == "default" for Qwen2-VL model at the moment
config.rope_scaling.update({"rope_type": "mrope"})
self.hidden_size = config.hidden_size
self.vision_start_token_id = config.vision_start_token_id
self.vision_end_token_id = config.vision_end_token_id
self.image_token_id = config.image_token_id
self.video_token_id = config.video_token_id
self.spatial_merge_size = config.vision_config.spatial_merge_size
self.embed_tokens = TensorParallelEmbedding(
prefix="model.embed_tokens", weights=weights
)
self.visual = Qwen2VisionModel(
prefix="visual", config=config.vision_config, weights=weights
)
self.text_model = Qwen2Model(prefix=None, config=config, weights=weights)
if config.tie_word_embeddings:
suffix = "model.embed_tokens"
else:
suffix = "lm_head"
self.lm_head = SpeculativeHead.load(
config,
prefix=suffix if not prefix else f"{prefix}.{suffix}",
weights=weights,
)
self.norm = FastRMSNorm.load(
prefix="model.norm",
weights=weights,
eps=config.rms_norm_eps,
)
self.device = weights.device
# based on https://github.com/huggingface/transformers/blob/e284c7e954abe12c34b50461c17f8115a0afe115/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py#L1391
# modified to first find segments then initialize position ids for each segment
# Steps:
# locate all vision and text segments
# calculate `vision_segment_lengths` for each vision segment to be use as offset
# calculate `text_segment_lengths` for each text segment to be used as offset
# create position ids for each vision segment based on the image grid
# create position ids for each text segment
# combine all the position ids
# the final segment is the difference between the last vision segment and the end of the input
# combine all the position ids and reshape to (3, input_ids_len) then swap dimensions to (input_ids_len, 3)
def get_position_ids(
self,
input_ids: torch.Tensor,
image_grid_thw: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if image_grid_thw is None:
return (
torch.arange(input_ids.shape[0], device=input_ids.device)
.unsqueeze(1)
.repeat(1, 3)
)
spatial_merge_size = self.spatial_merge_size
vision_start_token_id = self.vision_start_token_id
vision_end_token_id = self.vision_end_token_id
device = input_ids.device
dtype = input_ids.dtype
input_ids_len = input_ids.shape[0]
vision_starts = torch.where(input_ids == vision_start_token_id)[0]
vision_ends = torch.where(input_ids == vision_end_token_id)[0]
vision_segments = torch.stack((vision_starts, vision_ends), dim=1)
prev_vision_end = torch.cat(
[torch.zeros(1, device=vision_ends.device, dtype=dtype), vision_ends[:-1]]
)
text_lengths_between_vision = vision_segments[:, 0] - prev_vision_end + 1
vision_widths_max = torch.cat(
[
torch.zeros(1, device=image_grid_thw.device, dtype=dtype),
image_grid_thw[:-1, 2] // spatial_merge_size,
]
)
vision_segment_lengths = vision_widths_max + text_lengths_between_vision
vision_segment_lengths = vision_segment_lengths.cumsum(dim=0)
text_segment_lengths = vision_segment_lengths - text_lengths_between_vision
# create position ids for each vision segment based on the image grid
llm_pos_ids_list = []
for i, _ in enumerate(vision_segments):
t, h, w = (
image_grid_thw[i][0],
image_grid_thw[i][1] // spatial_merge_size,
image_grid_thw[i][2] // spatial_merge_size,
)
t_indices = torch.arange(t, device=device).repeat_interleave(h * w)
h_indices = torch.arange(h, device=device).repeat_interleave(w).repeat(t)
w_indices = torch.arange(w, device=device).repeat(t * h)
image_position_ids = torch.stack([t_indices, h_indices, w_indices], dim=0)
# offset by the position of the last vision segment
im = image_position_ids + vision_segment_lengths[i]
llm_pos_ids_list.append(im)
# create position ids for each text segment
text_ranges = [
torch.arange(seq_len, device=device).view(1, -1).expand(3, -1)
+ text_segment_lengths[i]
for i, seq_len in enumerate(text_lengths_between_vision)
]
full_llm_pos_ids_list = [
item for sublist in zip(text_ranges, llm_pos_ids_list) for item in sublist
]
max_s = full_llm_pos_ids_list[-1].max() + 1
final_text_len = input_ids_len - vision_ends[-1]
if final_text_len > 0:
m = torch.arange(final_text_len, device=device).view(1, -1).expand(3, -1)
full_llm_pos_ids_list.append(m + max_s)
position_ids = (
torch.cat(full_llm_pos_ids_list, dim=1).reshape(3, -1).transpose(0, 1)
)
return position_ids
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
lm_head_indices: Optional[torch.Tensor],
pixel_values: torch.FloatTensor = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
pixel_attention_mask=None,
image_sizes: Optional[torch.LongTensor] = None,
adapter_data: Optional[torch.Tensor] = None,
cross_attention_states: Optional[torch.Tensor] = None,
image_indices=None,
):
inputs_embeds = self.embed_tokens(input_ids)
# apply the visual model to the pixel values if they are provided
if pixel_values is not None and len(pixel_values) > 0:
pixel_values = pixel_values.to(inputs_embeds.dtype)
if pixel_values is not None:
image_embeds = self.visual(
pixel_values, grid_thw=image_grid_thw
).squeeze(0)
inputs_embeds[input_ids == self.image_token_id] = image_embeds
hidden_states = self.text_model(
inputs_embeds=inputs_embeds,
position_ids=position_ids,
cu_seqlen_prefill=cu_seqlen_prefill,
kv_cache=kv_cache,
block_tables=block_tables,
slots=slots,
seqlen=seqlen,
max_s=max_s,
true_max_s=max_s,
prefill_cache_indices=prefill_cache_indices,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits, speculative_logits = self.lm_head(hidden_states)
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/qwen2_vl.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/qwen2_vl.py",
"repo_id": "text-generation-inference",
"token_count": 10169
} |
import torch
from PIL import Image
from io import BytesIO
from opentelemetry import trace
from typing import Iterable, Optional, Tuple, List, Type, Dict
from transformers import PreTrainedTokenizerBase
from transformers.image_processing_utils import select_best_resolution
from text_generation_server.pb import generate_pb2
from text_generation_server.models.flash_causal_lm import (
FlashCausalLMBatch,
FlashCausalLM,
)
from text_generation_server.models.globals import PREFIX_CACHING, ATTENTION
from loguru import logger
from text_generation_server.utils.log import log_master
from transformers import AutoProcessor
from text_generation_server.layers.attention import Seqlen
from text_generation_server.models.metadata_kernels import block_tables_to_ragged
tracer = trace.get_tracer(__name__)
IDEFICS2_FAKE_TOKEN = "<fake_token_around_image>"
IDEFICS2_IMAGE_TOKEN = "<image>"
IDEFICS3_IMAGE_TOKEN = "<image>"
IDEFICS3_FAKE_IMAGE_TOKEN = "<fake_token_around_image>"
IDEFICS3_GLOBAL_IMG_TOKEN = "<global-img>"
# copied from: https://github.com/huggingface/transformers/blob/02ed609285c2448b3b54c31e362f2c389fa952ab/src/transformers/models/idefics3/processing_idefics3.py#L44-L60
def _prompt_split_image(
*,
image_seq_len: int,
image_rows: int,
image_cols: int,
fake_token_around_image: str,
image_token: str,
global_img_token: str,
):
"""Prompt with expanded image tokens for when the image is split into patches."""
text_split_images = ""
for n_h in range(image_rows):
for n_w in range(image_cols):
text_split_images += (
f"{fake_token_around_image}"
+ f"<row_{n_h + 1}_col_{n_w + 1}>"
+ f"{image_token}" * image_seq_len
)
text_split_images += "\n"
text_split_images += (
f"\n{fake_token_around_image}"
+ f"{global_img_token}"
+ f"{image_token}" * image_seq_len
+ f"{fake_token_around_image}"
)
return text_split_images
def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
"""
Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
Args:
image_size (`tuple`):
The size of the input image in the format (height, width).
grid_pinpoints (`List`):
A list containing possible resolutions. Each item in the list should be a tuple or list
of the form `(height, width)`.
patch_size (`int`):
The size of each image patch.
Returns:
tuple: The shape of the image patch grid in the format (width, height).
"""
if not isinstance(grid_pinpoints, list):
raise ValueError("grid_pinpoints should be a list of tuples or lists")
height, width = select_best_resolution(image_size, grid_pinpoints)
return height // patch_size, width // patch_size
def image_text_replacement(processor, image_input, config, image_id: int) -> str:
if config.model_type == "idefics2":
image_seq_len = 64
image_str = f"{IDEFICS2_FAKE_TOKEN}{IDEFICS2_IMAGE_TOKEN * image_seq_len}{IDEFICS2_FAKE_TOKEN}"
if processor.image_processor.do_image_splitting:
image_str *= 5
return image_str
if config.model_type == "idefics3":
# TODO: implement this in a more general way
n_rows = image_input["rows"][0][image_id]
n_cols = image_input["cols"][0][image_id]
image_seq_len = int(
((config.vision_config.image_size // config.vision_config.patch_size) ** 2)
/ (config.scale_factor**2)
)
image_str = _prompt_split_image(
image_seq_len=image_seq_len,
image_rows=n_rows,
image_cols=n_cols,
fake_token_around_image=IDEFICS3_FAKE_IMAGE_TOKEN,
image_token=IDEFICS3_IMAGE_TOKEN,
global_img_token=IDEFICS3_GLOBAL_IMG_TOKEN,
)
return image_str
elif config.model_type == "llava_next":
height, width = image_input["image_sizes"][image_id]
num_features = get_number_of_features(height, width, config)
log_master(
logger.info,
f"Found {num_features} features in image of resolution {height}x{width}",
)
return "<image>" * num_features
elif config.model_type == "paligemma":
return "<image>" * config.text_config.num_image_tokens
elif config.model_type == "qwen2_vl":
grid_t, grid_h, grid_w = image_input["image_grid_thw"][image_id]
num_pads = grid_t * grid_h * grid_w // 4
padding = "<|image_pad|>" * num_pads
return f"<|vision_start|>{padding}<|vision_end|>"
else:
raise RuntimeError(f"Unknown config {config.model_type} for multimodal")
def image_text_replacement_fixup(config, text: str) -> str:
if config.model_type == "idefics2":
return text.replace(
f"{IDEFICS2_FAKE_TOKEN}{IDEFICS2_FAKE_TOKEN}", IDEFICS2_FAKE_TOKEN
)
return text
def get_unpadded_features(
original_height: int,
original_width: int,
npatches: int,
num_patch_height: int,
num_patch_width: int,
) -> Tuple[int, int]:
current_height = npatches * num_patch_height
current_width = npatches * num_patch_width
aspect_ratio: float = original_width / original_height
current_aspect_ratio: float = current_width / current_height
if aspect_ratio > current_aspect_ratio:
new_height = (original_height * current_width) // original_width
padding = (current_height - new_height) // 2
current_height = current_height - (2 * padding)
else:
new_width = (original_width * current_height) // original_height
padding = (current_width - new_width) // 2
current_width = current_width - (2 * padding)
unpadded_features = current_height * current_width
newline_features = current_height
return (unpadded_features, newline_features)
def get_number_of_features(height: int, width: int, config) -> int:
# From config
# Hardcoded for CLIP for now
# image_grid_pinpoints = [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]
image_grid_pinpoints = config.image_grid_pinpoints
image_size = config.vision_config.image_size
patch_size = config.vision_config.patch_size
assert image_size % patch_size == 0
npatches = image_size // patch_size
# Dimensions are intentionally swapped to be bug-compatible with
# upstream: https://github.com/LLaVA-VL/LLaVA-NeXT/issues/59
num_patch_width, num_patch_height = get_anyres_image_grid_shape(
[height, width],
image_grid_pinpoints,
image_size,
)
unpadded_features, newline_features = get_unpadded_features(
height, width, npatches, num_patch_height, num_patch_width
)
# The base patch covers the entire image
base_features = npatches**2
return unpadded_features + newline_features + base_features
class VlmCausalLMBatch(FlashCausalLMBatch):
pixel_values: Optional[List[torch.Tensor]]
pixel_attention_mask: Optional[List[torch.Tensor]]
image_sizes: Optional[List[Tuple[int, int]]]
image_grid_thw: Optional[torch.Tensor]
@classmethod
@tracer.start_as_current_span("concatenate")
def concatenate(cls, batches):
batch = super(VlmCausalLMBatch, cls).concatenate(batches)
batch.pixel_values = None
batch.pixel_attention_mask = None
batch.image_sizes = None
batch.image_grid_thw = None
return batch
@tracer.start_as_current_span("filter")
def filter(self, request_ids: List[int]):
batch = super().filter(request_ids)
batch.pixel_values = None
batch.pixel_attention_mask = None
batch.image_sizes = None
batch.image_grid_thw = None
return batch
@classmethod
def batch_tokenized_inputs(
cls, requests: Iterable[generate_pb2.Request], tokenizer, processor, config
):
# Process images first. We need all of them so that the processor
# can make the image splits the same size. And we need the final
# sizes to insert correct number of image tokens.
images = []
for r in requests:
for chunk in r.input_chunks.chunks:
chunk_type = chunk.WhichOneof("chunk")
if chunk_type == "text":
pass
elif chunk_type == "image":
image = Image.open(BytesIO(chunk.image.data))
# qwen2_vl expects images to be greater than 20 pixels, this is for warmup since the
# default warmup image is 20x20
if config.model_type == "qwen2_vl":
if image.width <= 20:
w = image.width * 2
h = image.height * 2
image = image.resize((w, h))
if config.model_type == "llava_next":
images.append(image)
else:
images.append([image])
else:
raise RuntimeError(f"Invalid chunk type {chunk_type}")
if images:
kwargs = {}
if (
hasattr(processor, "image_processor_class")
and processor.image_processor_class == "Idefics3ImageProcessor"
):
kwargs["return_row_col_info"] = True
image_inputs = processor.image_processor(
images, return_tensors="pt", **kwargs
)
else:
image_inputs = None
batch_tokenized_inputs = []
max_length = 0
image_id = 0
for r in requests:
full_text = ""
for chunk in r.input_chunks.chunks:
chunk_type = chunk.WhichOneof("chunk")
if chunk_type == "text":
full_text += chunk.text
elif chunk_type == "image":
full_text += image_text_replacement(
processor, image_inputs, config, image_id
)
image_id += 1
full_text = image_text_replacement_fixup(config, full_text)
input_ids = tokenizer(
full_text,
truncation=True,
max_length=r.truncate,
add_special_tokens=r.add_special_tokens,
)["input_ids"]
max_length = max(max_length, len(input_ids))
batch_tokenized_inputs.append(input_ids)
return batch_tokenized_inputs, image_inputs
@classmethod
def from_pb_processor(
cls,
pb: generate_pb2.Batch,
tokenizer: PreTrainedTokenizerBase,
processor,
config,
dtype: torch.dtype,
device: torch.device,
) -> "VlmCausalLMBatch":
batch_tokenized_inputs, image_inputs = cls.batch_tokenized_inputs(
pb.requests, tokenizer, processor, config
)
batch = cls.from_tokenized(pb, tokenizer, batch_tokenized_inputs, dtype, device)
if image_inputs is not None:
batch.pixel_values = image_inputs["pixel_values"].to(device=device)
if "pixel_attention_mask" in image_inputs:
batch.pixel_attention_mask = image_inputs["pixel_attention_mask"].to(
device=device
)
else:
batch.pixel_attention_mask = None
if "image_sizes" in image_inputs:
batch.image_sizes = image_inputs["image_sizes"].to(device=device)
else:
batch.image_sizes = None
if "image_grid_thw" in image_inputs:
batch.image_grid_thw = image_inputs["image_grid_thw"].to(device=device)
else:
batch.image_grid_thw = None
else:
batch.pixel_values = None
batch.pixel_attention_mask = None
batch.image_sizes = None
batch.image_grid_thw = None
return batch
class VlmCausalLM(FlashCausalLM):
def __init__(
self,
model_id: str,
*,
processor_class=AutoProcessor,
processor_kwargs=None,
batch_class=VlmCausalLMBatch,
revision,
trust_remote_code: bool,
**kwargs,
):
if PREFIX_CACHING:
raise NotImplementedError("Vlm do not work with prefix caching yet")
if processor_kwargs is None:
processor_kwargs = {}
self.processor = processor_class.from_pretrained(
model_id,
revision=revision,
trust_remote_code=trust_remote_code,
**processor_kwargs,
)
self.batch_class = batch_class
super().__init__(
model_id=model_id,
revision=revision,
trust_remote_code=trust_remote_code,
# FIXME: VLM do not work with context chunking yet
support_chunking=False,
**kwargs,
)
@property
def batch_type(self) -> Type[VlmCausalLMBatch]:
return self.batch_class
def max_past(self) -> Optional[int]:
return getattr(self.model.text_model, "max_past", None)
def forward(
self,
batch: VlmCausalLMBatch,
adapter_data: Optional[Dict[str, torch.Tensor]] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
# Model Forward
if batch.speculative_ids is not None:
input_ids = batch.input_ids
position_ids = batch.position_ids
cu_seqlen_prefill = batch.cu_seqlen_prefill
kv_cache = self.kv_cache
block_tables = batch.block_tables_tensor
slots = batch.slots[batch.slot_indices]
input_lengths = batch.input_lengths_tensor
max_s = batch.max_current_length
lm_head_indices = batch.prefill_head_indices
speculative_ids = batch.speculative_ids
B, speculative_length = speculative_ids.shape
new_length = speculative_length + 1
new_input_ids = torch.cat(
[input_ids.unsqueeze(-1), speculative_ids], dim=1
).reshape(-1)
arange = torch.arange(new_length, device=position_ids.device).unsqueeze(0)
arange_int = arange.to(dtype=torch.int32)
new_position_ids = (
position_ids.unsqueeze(-1).expand(B, new_length) + arange
).view(-1)
slots = (slots.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1)
input_lengths = (
input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int
).view(-1)
cache_lengths_tensor = (
batch.cache_lengths_tensor.unsqueeze(-1).expand(B, new_length)
).reshape(-1)
# Add Copy the block tables for all members
block_tables = (
block_tables.unsqueeze(1)
.expand(B, new_length, -1)
.reshape(B * new_length, -1)
.contiguous()
)
max_s = max_s + speculative_length
input_ids = new_input_ids
position_ids = new_position_ids
else:
input_ids = batch.input_ids
position_ids = batch.position_ids
cu_seqlen_prefill = batch.cu_seqlen_prefill
kv_cache = self.kv_cache
block_tables = batch.block_tables_tensor
slots = batch.slots[batch.slot_indices]
input_lengths = batch.input_lengths_tensor
cache_lengths_tensor = batch.cache_lengths_tensor
max_s = batch.max_current_length
lm_head_indices = batch.prefill_head_indices
if self.model.config.model_type == "qwen2_vl":
if position_ids.dim() == 1 and batch.prefilling:
position_ids = self.model.get_position_ids(
input_ids, batch.image_grid_thw
)
batch.position_ids = position_ids
if cu_seqlen_prefill is None and self.max_past() is not None:
# In decode, not prefill, we're actually overwriting the KV-cache
# in a circular buffer mode.
# This makes sure the max_s for the decode pass is correct.
max_s = min(self.max_past(), max_s)
# Try to find an associated cuda graph
bs = input_ids.shape[0]
sorted_padded_bs = sorted([k for k in self.cuda_graphs.keys() if k >= bs])
if sorted_padded_bs:
# Get associated cuda graph
cuda_graph = self.cuda_graphs[sorted_padded_bs[0]]
else:
cuda_graph = None
if cu_seqlen_prefill is not None or cuda_graph is None:
if ATTENTION == "flashinfer":
block_tables = block_tables_to_ragged(
block_tables=block_tables,
input_lengths=batch.input_lengths,
cache_lengths=batch.cache_lengths,
input_lengths_tensor=batch.input_lengths_tensor,
cache_lengths_tensor=batch.cache_lengths_tensor,
max_current_length=batch.max_current_length,
)
with self._forward_context(
block_tables=block_tables,
cu_seqlen_prefill=cu_seqlen_prefill,
input_lengths_tensor=input_lengths,
cache_lengths_tensor=cache_lengths_tensor,
):
seqlen = Seqlen(
input_lengths=input_lengths,
cache_lengths=cache_lengths_tensor,
cu_seqlen_q=cu_seqlen_prefill,
max_q=batch.max_input_length,
max_k=batch.max_current_length,
)
logits, speculative_logits = self.model.forward(
input_ids=input_ids,
position_ids=position_ids,
cu_seqlen_prefill=cu_seqlen_prefill,
kv_cache=kv_cache,
block_tables=block_tables,
slots=slots,
seqlen=seqlen,
max_s=max_s,
prefill_cache_indices=batch.prefill_cache_indices,
lm_head_indices=lm_head_indices,
pixel_values=batch.pixel_values,
pixel_attention_mask=batch.pixel_attention_mask,
image_sizes=batch.image_sizes,
image_grid_thw=batch.image_grid_thw,
)
if batch.prefill_cache_indices is not None:
batch.prefill_cache_indices = None
if batch.pixel_values is not None:
batch.pixel_values = None
if batch.pixel_attention_mask is not None:
batch.pixel_attention_mask = None
if batch.image_sizes is not None:
batch.image_sizes = None
if batch.image_grid_thw is not None:
batch.image_grid_thw = None
return logits, speculative_logits
# Copy inputs to the static inputs of the cuda graph
# Static inputs are potentially padded
cuda_graph["input_ids"][: input_ids.shape[0]] = input_ids
cuda_graph["position_ids"][: position_ids.shape[0]] = position_ids
if ATTENTION == "flashinfer":
block_tables = block_tables_to_ragged(
block_tables=block_tables,
input_lengths=batch.input_lengths,
cache_lengths=batch.cache_lengths,
input_lengths_tensor=batch.input_lengths_tensor,
cache_lengths_tensor=batch.cache_lengths_tensor,
max_current_length=batch.max_current_length,
)
cuda_graph["block_tables"][: block_tables.shape[0]] = block_tables
else:
cuda_graph["block_tables"][
: block_tables.shape[0], : block_tables.shape[1]
] = block_tables
# XXX: This is working only because block 0 is reserved for the healthcheck
# so it doesn't matter if we override it with bogus values.
cuda_graph["slots"].fill_(0)
cuda_graph["slots"][: slots.shape[0]] = slots
cuda_graph["input_lengths"].zero_()
cuda_graph["input_lengths"][: input_lengths.shape[0]] = input_lengths
cuda_graph["cache_lengths"].zero_()
cuda_graph["cache_lengths"][
: cache_lengths_tensor.shape[0]
] = cache_lengths_tensor
with self._forward_context(
block_tables=cuda_graph["block_tables"],
cu_seqlen_prefill=None,
input_lengths_tensor=cuda_graph["input_lengths"],
cache_lengths_tensor=cuda_graph["cache_lengths"],
state=cuda_graph["state"],
):
# Replay the graph
cuda_graph["graph"].replay()
# Slice output to the correct shape
speculative_logits = (
cuda_graph["speculative_logits"][:bs]
if cuda_graph["speculative_logits"] is not None
else None
)
logits = cuda_graph["logits"][:bs]
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/models/vlm_causal_lm.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/vlm_causal_lm.py",
"repo_id": "text-generation-inference",
"token_count": 10374
} |
from typing import Optional
SUPPORT_CHUNKING: Optional[bool] = None
MAX_PREFILL_TOKENS: Optional[int] = None
def set_support_chunking(support_chunking: bool):
global SUPPORT_CHUNKING
SUPPORT_CHUNKING = support_chunking
def get_support_chunking() -> bool:
global SUPPORT_CHUNKING
return SUPPORT_CHUNKING
def set_max_prefill_tokens(max_prefill_tokens: int):
global MAX_PREFILL_TOKENS
MAX_PREFILL_TOKENS = max_prefill_tokens
def get_max_prefill_tokens() -> int:
global MAX_PREFILL_TOKENS
return MAX_PREFILL_TOKENS
| text-generation-inference/server/text_generation_server/utils/prefill_chunking.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/utils/prefill_chunking.py",
"repo_id": "text-generation-inference",
"token_count": 221
} |
# EditorConfig helps developers define and maintain consistent
# coding styles between different editors or IDEs
# http://editorconfig.org
root = true
[*]
indent_style = space
indent_size = 2
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
[*.md]
trim_trailing_whitespace = false
| tokenizers/bindings/node/.editorconfig/0 | {
"file_path": "tokenizers/bindings/node/.editorconfig",
"repo_id": "tokenizers",
"token_count": 108
} |
/* tslint:disable */
/* eslint-disable */
/* prettier-ignore */
/* auto-generated by NAPI-RS */
const { existsSync, readFileSync } = require('fs')
const { join } = require('path')
const { platform, arch } = process
let nativeBinding = null
let localFileExisted = false
let loadError = null
function isMusl() {
// For Node 10
if (!process.report || typeof process.report.getReport !== 'function') {
try {
const lddPath = require('child_process').execSync('which ldd').toString().trim()
return readFileSync(lddPath, 'utf8').includes('musl')
} catch (e) {
return true
}
} else {
const { glibcVersionRuntime } = process.report.getReport().header
return !glibcVersionRuntime
}
}
switch (platform) {
case 'android':
switch (arch) {
case 'arm64':
localFileExisted = existsSync(join(__dirname, 'tokenizers.android-arm64.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.android-arm64.node')
} else {
nativeBinding = require('tokenizers-android-arm64')
}
} catch (e) {
loadError = e
}
break
case 'arm':
localFileExisted = existsSync(join(__dirname, 'tokenizers.android-arm-eabi.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.android-arm-eabi.node')
} else {
nativeBinding = require('tokenizers-android-arm-eabi')
}
} catch (e) {
loadError = e
}
break
default:
throw new Error(`Unsupported architecture on Android ${arch}`)
}
break
case 'win32':
switch (arch) {
case 'x64':
localFileExisted = existsSync(join(__dirname, 'tokenizers.win32-x64-msvc.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.win32-x64-msvc.node')
} else {
nativeBinding = require('tokenizers-win32-x64-msvc')
}
} catch (e) {
loadError = e
}
break
case 'ia32':
localFileExisted = existsSync(join(__dirname, 'tokenizers.win32-ia32-msvc.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.win32-ia32-msvc.node')
} else {
nativeBinding = require('tokenizers-win32-ia32-msvc')
}
} catch (e) {
loadError = e
}
break
case 'arm64':
localFileExisted = existsSync(join(__dirname, 'tokenizers.win32-arm64-msvc.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.win32-arm64-msvc.node')
} else {
nativeBinding = require('tokenizers-win32-arm64-msvc')
}
} catch (e) {
loadError = e
}
break
default:
throw new Error(`Unsupported architecture on Windows: ${arch}`)
}
break
case 'darwin':
localFileExisted = existsSync(join(__dirname, 'tokenizers.darwin-universal.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.darwin-universal.node')
} else {
nativeBinding = require('tokenizers-darwin-universal')
}
break
} catch {}
switch (arch) {
case 'x64':
localFileExisted = existsSync(join(__dirname, 'tokenizers.darwin-x64.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.darwin-x64.node')
} else {
nativeBinding = require('tokenizers-darwin-x64')
}
} catch (e) {
loadError = e
}
break
case 'arm64':
localFileExisted = existsSync(join(__dirname, 'tokenizers.darwin-arm64.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.darwin-arm64.node')
} else {
nativeBinding = require('tokenizers-darwin-arm64')
}
} catch (e) {
loadError = e
}
break
default:
throw new Error(`Unsupported architecture on macOS: ${arch}`)
}
break
case 'freebsd':
if (arch !== 'x64') {
throw new Error(`Unsupported architecture on FreeBSD: ${arch}`)
}
localFileExisted = existsSync(join(__dirname, 'tokenizers.freebsd-x64.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.freebsd-x64.node')
} else {
nativeBinding = require('tokenizers-freebsd-x64')
}
} catch (e) {
loadError = e
}
break
case 'linux':
switch (arch) {
case 'x64':
if (isMusl()) {
localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-x64-musl.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.linux-x64-musl.node')
} else {
nativeBinding = require('tokenizers-linux-x64-musl')
}
} catch (e) {
loadError = e
}
} else {
localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-x64-gnu.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.linux-x64-gnu.node')
} else {
nativeBinding = require('tokenizers-linux-x64-gnu')
}
} catch (e) {
loadError = e
}
}
break
case 'arm64':
if (isMusl()) {
localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-arm64-musl.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.linux-arm64-musl.node')
} else {
nativeBinding = require('tokenizers-linux-arm64-musl')
}
} catch (e) {
loadError = e
}
} else {
localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-arm64-gnu.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.linux-arm64-gnu.node')
} else {
nativeBinding = require('tokenizers-linux-arm64-gnu')
}
} catch (e) {
loadError = e
}
}
break
case 'arm':
localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-arm-gnueabihf.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.linux-arm-gnueabihf.node')
} else {
nativeBinding = require('tokenizers-linux-arm-gnueabihf')
}
} catch (e) {
loadError = e
}
break
case 'riscv64':
if (isMusl()) {
localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-riscv64-musl.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.linux-riscv64-musl.node')
} else {
nativeBinding = require('tokenizers-linux-riscv64-musl')
}
} catch (e) {
loadError = e
}
} else {
localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-riscv64-gnu.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.linux-riscv64-gnu.node')
} else {
nativeBinding = require('tokenizers-linux-riscv64-gnu')
}
} catch (e) {
loadError = e
}
}
break
case 's390x':
localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-s390x-gnu.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.linux-s390x-gnu.node')
} else {
nativeBinding = require('tokenizers-linux-s390x-gnu')
}
} catch (e) {
loadError = e
}
break
default:
throw new Error(`Unsupported architecture on Linux: ${arch}`)
}
break
default:
throw new Error(`Unsupported OS: ${platform}, architecture: ${arch}`)
}
if (!nativeBinding) {
if (loadError) {
throw loadError
}
throw new Error(`Failed to load native binding`)
}
const {
Decoder,
bpeDecoder,
byteFallbackDecoder,
ctcDecoder,
fuseDecoder,
metaspaceDecoder,
replaceDecoder,
sequenceDecoder,
stripDecoder,
wordPieceDecoder,
Encoding,
TruncationDirection,
TruncationStrategy,
Model,
BPE,
WordPiece,
WordLevel,
Unigram,
Normalizer,
prependNormalizer,
stripAccentsNormalizer,
bertNormalizer,
nfdNormalizer,
nfkdNormalizer,
nfcNormalizer,
nfkcNormalizer,
stripNormalizer,
sequenceNormalizer,
lowercase,
replace,
nmt,
precompiled,
JsSplitDelimiterBehavior,
PreTokenizer,
byteLevelPreTokenizer,
byteLevelAlphabet,
whitespacePreTokenizer,
whitespaceSplitPreTokenizer,
bertPreTokenizer,
metaspacePreTokenizer,
splitPreTokenizer,
punctuationPreTokenizer,
sequencePreTokenizer,
charDelimiterSplit,
digitsPreTokenizer,
Processor,
bertProcessing,
robertaProcessing,
byteLevelProcessing,
templateProcessing,
sequenceProcessing,
PaddingDirection,
AddedToken,
Tokenizer,
Trainer,
slice,
mergeEncodings,
} = nativeBinding
module.exports.Decoder = Decoder
module.exports.bpeDecoder = bpeDecoder
module.exports.byteFallbackDecoder = byteFallbackDecoder
module.exports.ctcDecoder = ctcDecoder
module.exports.fuseDecoder = fuseDecoder
module.exports.metaspaceDecoder = metaspaceDecoder
module.exports.replaceDecoder = replaceDecoder
module.exports.sequenceDecoder = sequenceDecoder
module.exports.stripDecoder = stripDecoder
module.exports.wordPieceDecoder = wordPieceDecoder
module.exports.Encoding = Encoding
module.exports.TruncationDirection = TruncationDirection
module.exports.TruncationStrategy = TruncationStrategy
module.exports.Model = Model
module.exports.BPE = BPE
module.exports.WordPiece = WordPiece
module.exports.WordLevel = WordLevel
module.exports.Unigram = Unigram
module.exports.Normalizer = Normalizer
module.exports.prependNormalizer = prependNormalizer
module.exports.stripAccentsNormalizer = stripAccentsNormalizer
module.exports.bertNormalizer = bertNormalizer
module.exports.nfdNormalizer = nfdNormalizer
module.exports.nfkdNormalizer = nfkdNormalizer
module.exports.nfcNormalizer = nfcNormalizer
module.exports.nfkcNormalizer = nfkcNormalizer
module.exports.stripNormalizer = stripNormalizer
module.exports.sequenceNormalizer = sequenceNormalizer
module.exports.lowercase = lowercase
module.exports.replace = replace
module.exports.nmt = nmt
module.exports.precompiled = precompiled
module.exports.JsSplitDelimiterBehavior = JsSplitDelimiterBehavior
module.exports.PreTokenizer = PreTokenizer
module.exports.byteLevelPreTokenizer = byteLevelPreTokenizer
module.exports.byteLevelAlphabet = byteLevelAlphabet
module.exports.whitespacePreTokenizer = whitespacePreTokenizer
module.exports.whitespaceSplitPreTokenizer = whitespaceSplitPreTokenizer
module.exports.bertPreTokenizer = bertPreTokenizer
module.exports.metaspacePreTokenizer = metaspacePreTokenizer
module.exports.splitPreTokenizer = splitPreTokenizer
module.exports.punctuationPreTokenizer = punctuationPreTokenizer
module.exports.sequencePreTokenizer = sequencePreTokenizer
module.exports.charDelimiterSplit = charDelimiterSplit
module.exports.digitsPreTokenizer = digitsPreTokenizer
module.exports.Processor = Processor
module.exports.bertProcessing = bertProcessing
module.exports.robertaProcessing = robertaProcessing
module.exports.byteLevelProcessing = byteLevelProcessing
module.exports.templateProcessing = templateProcessing
module.exports.sequenceProcessing = sequenceProcessing
module.exports.PaddingDirection = PaddingDirection
module.exports.AddedToken = AddedToken
module.exports.Tokenizer = Tokenizer
module.exports.Trainer = Trainer
module.exports.slice = slice
module.exports.mergeEncodings = mergeEncodings
| tokenizers/bindings/node/index.js/0 | {
"file_path": "tokenizers/bindings/node/index.js",
"repo_id": "tokenizers",
"token_count": 5374
} |
{
"name": "tokenizers-linux-x64-musl",
"version": "0.13.4-rc1",
"os": [
"linux"
],
"cpu": [
"x64"
],
"main": "tokenizers.linux-x64-musl.node",
"files": [
"tokenizers.linux-x64-musl.node"
],
"description": "Tokenizers platform specific bindings",
"keywords": [
"napi-rs",
"NAPI",
"N-API",
"Rust",
"node-addon",
"node-addon-api"
],
"license": "MIT",
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"repository": "tokenizers",
"libc": [
"musl"
]
} | tokenizers/bindings/node/npm/linux-x64-musl/package.json/0 | {
"file_path": "tokenizers/bindings/node/npm/linux-x64-musl/package.json",
"repo_id": "tokenizers",
"token_count": 291
} |
use crate::arc_rwlock_serde;
use serde::{Deserialize, Serialize};
extern crate tokenizers as tk;
use napi::bindgen_prelude::*;
use napi_derive::napi;
use std::sync::{Arc, RwLock};
use tk::processors::PostProcessorWrapper;
use tk::Encoding;
#[derive(Clone, Serialize, Deserialize)]
#[napi]
pub struct Processor {
#[serde(flatten, with = "arc_rwlock_serde")]
processor: Option<Arc<RwLock<PostProcessorWrapper>>>,
}
impl tk::PostProcessor for Processor {
fn added_tokens(&self, is_pair: bool) -> usize {
self
.processor
.as_ref()
.expect("Uninitialized PostProcessor")
.read()
.unwrap()
.added_tokens(is_pair)
}
fn process_encodings(
&self,
encodings: Vec<Encoding>,
add_special_tokens: bool,
) -> tk::Result<Vec<Encoding>> {
self
.processor
.as_ref()
.ok_or("Uninitialized PostProcessor")?
.read()
.unwrap()
.process_encodings(encodings, add_special_tokens)
}
}
#[napi]
pub fn bert_processing(sep: (String, u32), cls: (String, u32)) -> Result<Processor> {
Ok(Processor {
processor: Some(Arc::new(RwLock::new(
tk::processors::bert::BertProcessing::new(sep, cls).into(),
))),
})
}
#[napi]
pub fn roberta_processing(
sep: (String, u32),
cls: (String, u32),
trim_offsets: Option<bool>,
add_prefix_space: Option<bool>,
) -> Result<Processor> {
let trim_offsets = trim_offsets.unwrap_or(true);
let add_prefix_space = add_prefix_space.unwrap_or(true);
let mut processor = tk::processors::roberta::RobertaProcessing::new(sep, cls);
processor = processor.trim_offsets(trim_offsets);
processor = processor.add_prefix_space(add_prefix_space);
Ok(Processor {
processor: Some(Arc::new(RwLock::new(processor.into()))),
})
}
#[napi]
pub fn byte_level_processing(trim_offsets: Option<bool>) -> Result<Processor> {
let mut byte_level = tk::processors::byte_level::ByteLevel::default();
if let Some(trim_offsets) = trim_offsets {
byte_level = byte_level.trim_offsets(trim_offsets);
}
Ok(Processor {
processor: Some(Arc::new(RwLock::new(byte_level.into()))),
})
}
#[napi]
pub fn template_processing(
single: String,
pair: Option<String>,
special_tokens: Option<Vec<(String, u32)>>,
) -> Result<Processor> {
let special_tokens = special_tokens.unwrap_or_default();
let mut builder = tk::processors::template::TemplateProcessing::builder();
builder.try_single(single).map_err(Error::from_reason)?;
builder.special_tokens(special_tokens);
if let Some(pair) = pair {
builder.try_pair(pair).map_err(Error::from_reason)?;
}
let processor = builder
.build()
.map_err(|e| Error::from_reason(e.to_string()))?;
Ok(Processor {
processor: Some(Arc::new(RwLock::new(processor.into()))),
})
}
#[napi]
pub fn sequence_processing(processors: Vec<&Processor>) -> Processor {
let sequence: Vec<tk::PostProcessorWrapper> = processors
.into_iter()
.filter_map(|processor| {
processor
.processor
.as_ref()
.map(|processor| (**processor).read().unwrap().clone())
})
.clone()
.collect();
Processor {
processor: Some(Arc::new(RwLock::new(PostProcessorWrapper::Sequence(
tk::processors::sequence::Sequence::new(sequence),
)))),
}
}
| tokenizers/bindings/node/src/processors.rs/0 | {
"file_path": "tokenizers/bindings/node/src/processors.rs",
"repo_id": "tokenizers",
"token_count": 1336
} |
<p align="center">
<br>
<img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/>
<br>
<p>
<p align="center">
<a href="https://badge.fury.io/py/tokenizers">
<img alt="Build" src="https://badge.fury.io/py/tokenizers.svg">
</a>
<a href="https://github.com/huggingface/tokenizers/blob/master/LICENSE">
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue">
</a>
</p>
<br>
# Tokenizers
Provides an implementation of today's most used tokenizers, with a focus on performance and
versatility.
Bindings over the [Rust](https://github.com/huggingface/tokenizers/tree/master/tokenizers) implementation.
If you are interested in the High-level design, you can go check it there.
Otherwise, let's dive in!
## Main features:
- Train new vocabularies and tokenize using 4 pre-made tokenizers (Bert WordPiece and the 3
most common BPE versions).
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes
less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for research and production.
- Normalization comes with alignments tracking. It's always possible to get the part of the
original sentence that corresponds to a given token.
- Does all the pre-processing: Truncate, Pad, add the special tokens your model needs.
### Installation
#### With pip:
```bash
pip install tokenizers
```
#### From sources:
To use this method, you need to have the Rust installed:
```bash
# Install with:
curl https://sh.rustup.rs -sSf | sh -s -- -y
export PATH="$HOME/.cargo/bin:$PATH"
```
Once Rust is installed, you can compile doing the following
```bash
git clone https://github.com/huggingface/tokenizers
cd tokenizers/bindings/python
# Create a virtual env (you can use yours as well)
python -m venv .env
source .env/bin/activate
# Install `tokenizers` in the current virtual env
pip install -e .
```
### Load a pretrained tokenizer from the Hub
```python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_pretrained("bert-base-cased")
```
### Using the provided Tokenizers
We provide some pre-build tokenizers to cover the most common cases. You can easily load one of
these using some `vocab.json` and `merges.txt` files:
```python
from tokenizers import CharBPETokenizer
# Initialize a tokenizer
vocab = "./path/to/vocab.json"
merges = "./path/to/merges.txt"
tokenizer = CharBPETokenizer(vocab, merges)
# And then encode:
encoded = tokenizer.encode("I can feel the magic, can you?")
print(encoded.ids)
print(encoded.tokens)
```
And you can train them just as simply:
```python
from tokenizers import CharBPETokenizer
# Initialize a tokenizer
tokenizer = CharBPETokenizer()
# Then train it!
tokenizer.train([ "./path/to/files/1.txt", "./path/to/files/2.txt" ])
# Now, let's use it:
encoded = tokenizer.encode("I can feel the magic, can you?")
# And finally save it somewhere
tokenizer.save("./path/to/directory/my-bpe.tokenizer.json")
```
#### Provided Tokenizers
- `CharBPETokenizer`: The original BPE
- `ByteLevelBPETokenizer`: The byte level version of the BPE
- `SentencePieceBPETokenizer`: A BPE implementation compatible with the one used by SentencePiece
- `BertWordPieceTokenizer`: The famous Bert tokenizer, using WordPiece
All of these can be used and trained as explained above!
### Build your own
Whenever these provided tokenizers don't give you enough freedom, you can build your own tokenizer,
by putting all the different parts you need together.
You can check how we implemented the [provided tokenizers](https://github.com/huggingface/tokenizers/tree/master/bindings/python/py_src/tokenizers/implementations) and adapt them easily to your own needs.
#### Building a byte-level BPE
Here is an example showing how to build your own byte-level BPE by putting all the different pieces
together, and then saving it to a single file:
```python
from tokenizers import Tokenizer, models, pre_tokenizers, decoders, trainers, processors
# Initialize a tokenizer
tokenizer = Tokenizer(models.BPE())
# Customize pre-tokenization and decoding
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=True)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.ByteLevel(trim_offsets=True)
# And then train
trainer = trainers.BpeTrainer(
vocab_size=20000,
min_frequency=2,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet()
)
tokenizer.train([
"./path/to/dataset/1.txt",
"./path/to/dataset/2.txt",
"./path/to/dataset/3.txt"
], trainer=trainer)
# And Save it
tokenizer.save("byte-level-bpe.tokenizer.json", pretty=True)
```
Now, when you want to use this tokenizer, this is as simple as:
```python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_file("byte-level-bpe.tokenizer.json")
encoded = tokenizer.encode("I can feel the magic, can you?")
```
| tokenizers/bindings/python/README.md/0 | {
"file_path": "tokenizers/bindings/python/README.md",
"repo_id": "tokenizers",
"token_count": 1621
} |
from typing import Dict, Iterator, List, Optional, Tuple, Union
from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers
from tokenizers.models import BPE
from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str
from .base_tokenizer import BaseTokenizer
class ByteLevelBPETokenizer(BaseTokenizer):
"""ByteLevelBPETokenizer
Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model
"""
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
add_prefix_space: bool = False,
lowercase: bool = False,
dropout: Optional[float] = None,
unicode_normalizer: Optional[str] = None,
continuing_subword_prefix: Optional[str] = None,
end_of_word_suffix: Optional[str] = None,
trim_offsets: bool = False,
):
if vocab is not None and merges is not None:
tokenizer = Tokenizer(
BPE(
vocab,
merges,
dropout=dropout,
continuing_subword_prefix=continuing_subword_prefix or "",
end_of_word_suffix=end_of_word_suffix or "",
)
)
else:
tokenizer = Tokenizer(BPE())
# Check for Unicode normalization first (before everything else)
normalizers = []
if unicode_normalizer:
normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
if lowercase:
normalizers += [Lowercase()]
# Create the normalizer structure
if len(normalizers) > 0:
if len(normalizers) > 1:
tokenizer.normalizer = Sequence(normalizers)
else:
tokenizer.normalizer = normalizers[0]
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets)
parameters = {
"model": "ByteLevelBPE",
"add_prefix_space": add_prefix_space,
"lowercase": lowercase,
"dropout": dropout,
"unicode_normalizer": unicode_normalizer,
"continuing_subword_prefix": continuing_subword_prefix,
"end_of_word_suffix": end_of_word_suffix,
"trim_offsets": trim_offsets,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
return ByteLevelBPETokenizer(vocab, merges, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
show_progress: bool = True,
special_tokens: List[Union[str, AddedToken]] = [],
):
"""Train the model using the given files"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
show_progress=show_progress,
special_tokens=special_tokens,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 30000,
min_frequency: int = 2,
show_progress: bool = True,
special_tokens: List[Union[str, AddedToken]] = [],
length: Optional[int] = None,
):
"""Train the model using the given iterator"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
show_progress=show_progress,
special_tokens=special_tokens,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)
| tokenizers/bindings/python/py_src/tokenizers/implementations/byte_level_bpe.py/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/byte_level_bpe.py",
"repo_id": "tokenizers",
"token_count": 1978
} |
# Generated content DO NOT EDIT
class Trainer:
"""
Base class for all trainers
This class is not supposed to be instantiated directly. Instead, any implementation of a
Trainer will return an instance of this class when instantiated.
"""
class BpeTrainer(Trainer):
"""
Trainer capable of training a BPE model
Args:
vocab_size (:obj:`int`, `optional`):
The size of the final vocabulary, including all tokens and alphabet.
min_frequency (:obj:`int`, `optional`):
The minimum frequency a pair should have in order to be merged.
show_progress (:obj:`bool`, `optional`):
Whether to show progress bars while training.
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
A list of special tokens the model should know of.
limit_alphabet (:obj:`int`, `optional`):
The maximum different characters to keep in the alphabet.
initial_alphabet (:obj:`List[str]`, `optional`):
A list of characters to include in the initial alphabet, even
if not seen in the training dataset.
If the strings contain more than one character, only the first one
is kept.
continuing_subword_prefix (:obj:`str`, `optional`):
A prefix to be used for every subword that is not a beginning-of-word.
end_of_word_suffix (:obj:`str`, `optional`):
A suffix to be used for every subword that is a end-of-word.
max_token_length (:obj:`int`, `optional`):
Prevents creating tokens longer than the specified size.
This can help with reducing polluting your vocabulary with
highly repetitive tokens like `======` for wikipedia
"""
class UnigramTrainer(Trainer):
"""
Trainer capable of training a Unigram model
Args:
vocab_size (:obj:`int`):
The size of the final vocabulary, including all tokens and alphabet.
show_progress (:obj:`bool`):
Whether to show progress bars while training.
special_tokens (:obj:`List[Union[str, AddedToken]]`):
A list of special tokens the model should know of.
initial_alphabet (:obj:`List[str]`):
A list of characters to include in the initial alphabet, even
if not seen in the training dataset.
If the strings contain more than one character, only the first one
is kept.
shrinking_factor (:obj:`float`):
The shrinking factor used at each step of the training to prune the
vocabulary.
unk_token (:obj:`str`):
The token used for out-of-vocabulary tokens.
max_piece_length (:obj:`int`):
The maximum length of a given token.
n_sub_iterations (:obj:`int`):
The number of iterations of the EM algorithm to perform before
pruning the vocabulary.
"""
def __init__(
self,
vocab_size=8000,
show_progress=True,
special_tokens=[],
shrinking_factor=0.75,
unk_token=None,
max_piece_length=16,
n_sub_iterations=2,
):
pass
class WordLevelTrainer(Trainer):
"""
Trainer capable of training a WorldLevel model
Args:
vocab_size (:obj:`int`, `optional`):
The size of the final vocabulary, including all tokens and alphabet.
min_frequency (:obj:`int`, `optional`):
The minimum frequency a pair should have in order to be merged.
show_progress (:obj:`bool`, `optional`):
Whether to show progress bars while training.
special_tokens (:obj:`List[Union[str, AddedToken]]`):
A list of special tokens the model should know of.
"""
class WordPieceTrainer(Trainer):
"""
Trainer capable of training a WordPiece model
Args:
vocab_size (:obj:`int`, `optional`):
The size of the final vocabulary, including all tokens and alphabet.
min_frequency (:obj:`int`, `optional`):
The minimum frequency a pair should have in order to be merged.
show_progress (:obj:`bool`, `optional`):
Whether to show progress bars while training.
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
A list of special tokens the model should know of.
limit_alphabet (:obj:`int`, `optional`):
The maximum different characters to keep in the alphabet.
initial_alphabet (:obj:`List[str]`, `optional`):
A list of characters to include in the initial alphabet, even
if not seen in the training dataset.
If the strings contain more than one character, only the first one
is kept.
continuing_subword_prefix (:obj:`str`, `optional`):
A prefix to be used for every subword that is not a beginning-of-word.
end_of_word_suffix (:obj:`str`, `optional`):
A suffix to be used for every subword that is a end-of-word.
"""
def __init__(
self,
vocab_size=30000,
min_frequency=0,
show_progress=True,
special_tokens=[],
limit_alphabet=None,
initial_alphabet=[],
continuing_subword_prefix="##",
end_of_word_suffix=None,
):
pass
| tokenizers/bindings/python/py_src/tokenizers/trainers/__init__.pyi/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/trainers/__init__.pyi",
"repo_id": "tokenizers",
"token_count": 2178
} |
use serde::Serialize;
use std::collections::{hash_map::DefaultHasher, HashMap};
use std::hash::{Hash, Hasher};
use numpy::{npyffi, PyArray1, PyArrayMethods};
use pyo3::class::basic::CompareOp;
use pyo3::exceptions;
use pyo3::intern;
use pyo3::prelude::*;
use pyo3::types::*;
use tk::models::bpe::BPE;
use tk::tokenizer::{
Model, PaddingDirection, PaddingParams, PaddingStrategy, PostProcessor, TokenizerImpl,
TruncationDirection, TruncationParams, TruncationStrategy,
};
use tk::utils::iter::ResultShunt;
use tokenizers as tk;
use super::decoders::PyDecoder;
use super::encoding::PyEncoding;
use super::error::{PyError, ToPyResult};
use super::models::PyModel;
use super::normalizers::PyNormalizer;
use super::pre_tokenizers::PyPreTokenizer;
use super::trainers::PyTrainer;
use crate::processors::PyPostProcessor;
use crate::utils::{MaybeSizedIterator, PyBufferedIterator};
use std::collections::BTreeMap;
/// Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`.
/// It can have special options that defines the way it should behave.
///
/// Args:
/// content (:obj:`str`): The content of the token
///
/// single_word (:obj:`bool`, defaults to :obj:`False`):
/// Defines whether this token should only match single words. If :obj:`True`, this
/// token will never match inside of a word. For example the token ``ing`` would match
/// on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`.
/// The notion of "`inside of a word`" is defined by the word boundaries pattern in
/// regular expressions (ie. the token should start and end with word boundaries).
///
/// lstrip (:obj:`bool`, defaults to :obj:`False`):
/// Defines whether this token should strip all potential whitespaces on its left side.
/// If :obj:`True`, this token will greedily match any whitespace on its left. For
/// example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text
/// ``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left).
///
/// rstrip (:obj:`bool`, defaults to :obj:`False`):
/// Defines whether this token should strip all potential whitespaces on its right
/// side. If :obj:`True`, this token will greedily match any whitespace on its right.
/// It works just like :obj:`lstrip` but on the right.
///
/// normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
/// Defines whether this token should match against the normalized version of the input
/// text. For example, with the added token ``"yesterday"``, and a normalizer in charge of
/// lowercasing the text, the token could be extract from the input ``"I saw a lion
/// Yesterday"``.
/// special (:obj:`bool`, defaults to :obj:`False` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
/// Defines whether this token should be skipped when decoding.
///
#[pyclass(dict, module = "tokenizers", name = "AddedToken")]
pub struct PyAddedToken {
pub content: String,
pub special: bool,
pub single_word: Option<bool>,
pub lstrip: Option<bool>,
pub rstrip: Option<bool>,
pub normalized: Option<bool>,
}
impl PyAddedToken {
pub fn from<S: Into<String>>(content: S, special: Option<bool>) -> Self {
Self {
content: content.into(),
special: special.unwrap_or(false),
single_word: None,
lstrip: None,
rstrip: None,
normalized: None,
}
}
pub fn get_token(&self) -> tk::tokenizer::AddedToken {
let mut token = tk::AddedToken::from(&self.content, self.special);
if let Some(sw) = self.single_word {
token = token.single_word(sw);
}
if let Some(ls) = self.lstrip {
token = token.lstrip(ls);
}
if let Some(rs) = self.rstrip {
token = token.rstrip(rs);
}
if let Some(n) = self.normalized {
token = token.normalized(n);
}
token
}
pub fn as_pydict<'py>(&self, py: Python<'py>) -> PyResult<Bound<'py, PyDict>> {
let dict = PyDict::new(py);
let token = self.get_token();
dict.set_item("content", token.content)?;
dict.set_item("single_word", token.single_word)?;
dict.set_item("lstrip", token.lstrip)?;
dict.set_item("rstrip", token.rstrip)?;
dict.set_item("normalized", token.normalized)?;
dict.set_item("special", token.special)?;
Ok(dict)
}
}
impl From<tk::AddedToken> for PyAddedToken {
fn from(token: tk::AddedToken) -> Self {
Self {
content: token.content,
single_word: Some(token.single_word),
lstrip: Some(token.lstrip),
rstrip: Some(token.rstrip),
normalized: Some(token.normalized),
special: token.special,
}
}
}
#[pymethods]
impl PyAddedToken {
#[new]
#[pyo3(signature = (content=None, **kwargs), text_signature = "(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True, special=False)")]
fn __new__(content: Option<&str>, kwargs: Option<&Bound<'_, PyDict>>) -> PyResult<Self> {
let mut token = PyAddedToken::from(content.unwrap_or(""), None);
if let Some(kwargs) = kwargs {
for (key, value) in kwargs {
let key: String = key.extract()?;
match key.as_ref() {
"single_word" => token.single_word = Some(value.extract()?),
"lstrip" => token.lstrip = Some(value.extract()?),
"rstrip" => token.rstrip = Some(value.extract()?),
"normalized" => token.normalized = Some(value.extract()?),
"special" => token.special = value.extract()?,
_ => println!("Ignored unknown kwarg option {}", key),
}
}
}
Ok(token)
}
fn __getstate__<'py>(&self, py: Python<'py>) -> PyResult<Bound<'py, PyDict>> {
self.as_pydict(py)
}
fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> {
match state.downcast_bound::<PyDict>(py) {
Ok(state) => {
for (key, value) in state {
let key: String = key.extract()?;
match key.as_ref() {
"content" => self.content = value.extract()?,
"single_word" => self.single_word = Some(value.extract()?),
"lstrip" => self.lstrip = Some(value.extract()?),
"rstrip" => self.rstrip = Some(value.extract()?),
"normalized" => self.normalized = Some(value.extract()?),
"special" => self.special = value.extract()?,
_ => {}
}
}
Ok(())
}
Err(e) => Err(e.into()),
}
}
/// Get the content of this :obj:`AddedToken`
#[getter]
fn get_content(&self) -> &str {
&self.content
}
/// Set the content of this :obj:`AddedToken`
#[setter]
fn set_content(&mut self, content: String) {
self.content = content;
}
/// Get the value of the :obj:`rstrip` option
#[getter]
fn get_rstrip(&self) -> bool {
self.get_token().rstrip
}
/// Get the value of the :obj:`lstrip` option
#[getter]
fn get_lstrip(&self) -> bool {
self.get_token().lstrip
}
/// Get the value of the :obj:`single_word` option
#[getter]
fn get_single_word(&self) -> bool {
self.get_token().single_word
}
/// Get the value of the :obj:`normalized` option
#[getter]
fn get_normalized(&self) -> bool {
self.get_token().normalized
}
/// Get the value of the :obj:`special` option
#[getter]
fn get_special(&self) -> bool {
self.get_token().special
}
/// Set the value of the :obj:`special` option
#[setter]
fn set_special(&mut self, special: bool) {
self.special = special;
}
fn __str__(&self) -> PyResult<&str> {
Ok(&self.content)
}
fn __repr__(&self) -> PyResult<String> {
let bool_to_python = |p| match p {
true => "True",
false => "False",
};
let token = self.get_token();
Ok(format!(
"AddedToken(\"{}\", rstrip={}, lstrip={}, single_word={}, normalized={}, special={})",
self.content,
bool_to_python(token.rstrip),
bool_to_python(token.lstrip),
bool_to_python(token.single_word),
bool_to_python(token.normalized),
bool_to_python(token.special)
))
}
fn __richcmp__(&self, other: Py<PyAddedToken>, op: CompareOp) -> bool {
use CompareOp::*;
Python::with_gil(|py| match op {
Lt | Le | Gt | Ge => false,
Eq => self.get_token() == other.borrow(py).get_token(),
Ne => self.get_token() != other.borrow(py).get_token(),
})
}
fn __hash__(&self) -> u64 {
let mut hasher = DefaultHasher::new();
self.get_token().hash(&mut hasher);
hasher.finish()
}
}
struct TextInputSequence<'s>(tk::InputSequence<'s>);
impl<'s> FromPyObject<'s> for TextInputSequence<'s> {
fn extract_bound(ob: &Bound<'s, PyAny>) -> PyResult<Self> {
let err = exceptions::PyTypeError::new_err("TextInputSequence must be str");
if let Ok(s) = ob.extract::<String>() {
Ok(Self(s.into()))
} else {
Err(err)
}
}
}
impl<'s> From<TextInputSequence<'s>> for tk::InputSequence<'s> {
fn from(s: TextInputSequence<'s>) -> Self {
s.0
}
}
struct PyArrayUnicode(Vec<String>);
impl FromPyObject<'_> for PyArrayUnicode {
fn extract_bound(ob: &Bound<'_, PyAny>) -> PyResult<Self> {
// SAFETY Making sure the pointer is a valid numpy array requires calling numpy C code
if unsafe { npyffi::PyArray_Check(ob.py(), ob.as_ptr()) } == 0 {
return Err(exceptions::PyTypeError::new_err("Expected an np.array"));
}
let arr = ob.as_ptr() as *mut npyffi::PyArrayObject;
// SAFETY Getting all the metadata about the numpy array to check its sanity
let (type_num, elsize, _alignment, data, nd, flags) = unsafe {
let desc = (*arr).descr;
(
(*desc).type_num,
npyffi::PyDataType_ELSIZE(ob.py(), desc) as usize,
npyffi::PyDataType_ALIGNMENT(ob.py(), desc) as usize,
(*arr).data,
(*arr).nd,
(*arr).flags,
)
};
if nd != 1 {
return Err(exceptions::PyTypeError::new_err(
"Expected a 1 dimensional np.array",
));
}
if flags & (npyffi::NPY_ARRAY_C_CONTIGUOUS | npyffi::NPY_ARRAY_F_CONTIGUOUS) == 0 {
return Err(exceptions::PyTypeError::new_err(
"Expected a contiguous np.array",
));
}
if type_num != npyffi::types::NPY_TYPES::NPY_UNICODE as i32 {
return Err(exceptions::PyTypeError::new_err(
"Expected a np.array[dtype='U']",
));
}
// SAFETY Looking at the raw numpy data to create new owned Rust strings via copies (so it's safe afterwards).
unsafe {
let n_elem = *(*arr).dimensions as usize;
let all_bytes = std::slice::from_raw_parts(data as *const u8, elsize * n_elem);
let seq = (0..n_elem)
.map(|i| {
let bytes = &all_bytes[i * elsize..(i + 1) * elsize];
Ok(std::str::from_utf8(bytes)?.to_owned())
// let unicode = pyo3::ffi::PyUnicode_FromKindAndData(
// pyo3::ffi::PyUnicode_4BYTE_KIND as _,
// bytes.as_ptr() as *const _,
// elsize as isize / alignment as isize,
// );
// let py = ob.py();
// let obj = PyObject::from_owned_ptr(py, unicode);
// let s = obj.downcast_bound::<PyString>(py)?;
// Ok(s.to_string_lossy().trim_matches(char::from(0)).to_owned())
})
.collect::<PyResult<Vec<_>>>()?;
Ok(Self(seq))
}
}
}
impl From<PyArrayUnicode> for tk::InputSequence<'_> {
fn from(s: PyArrayUnicode) -> Self {
s.0.into()
}
}
struct PyArrayStr(Vec<String>);
impl FromPyObject<'_> for PyArrayStr {
fn extract_bound(ob: &Bound<'_, PyAny>) -> PyResult<Self> {
let array = ob.downcast::<PyArray1<PyObject>>()?;
let seq = array
.readonly()
.as_array()
.iter()
.map(|obj| {
let s = obj.downcast_bound::<PyString>(ob.py())?;
Ok(s.to_string_lossy().into_owned())
})
.collect::<PyResult<Vec<_>>>()?;
Ok(Self(seq))
}
}
impl From<PyArrayStr> for tk::InputSequence<'_> {
fn from(s: PyArrayStr) -> Self {
s.0.into()
}
}
struct PreTokenizedInputSequence<'s>(tk::InputSequence<'s>);
impl<'s> FromPyObject<'s> for PreTokenizedInputSequence<'s> {
fn extract_bound(ob: &Bound<'s, PyAny>) -> PyResult<Self> {
if let Ok(seq) = ob.extract::<PyArrayUnicode>() {
return Ok(Self(seq.into()));
}
if let Ok(seq) = ob.extract::<PyArrayStr>() {
return Ok(Self(seq.into()));
}
if let Ok(s) = ob.downcast::<PyList>() {
if let Ok(seq) = s.extract::<Vec<String>>() {
return Ok(Self(seq.into()));
}
}
if let Ok(s) = ob.downcast::<PyTuple>() {
if let Ok(seq) = s.extract::<Vec<String>>() {
return Ok(Self(seq.into()));
}
}
Err(exceptions::PyTypeError::new_err(
"PreTokenizedInputSequence must be Union[List[str], Tuple[str]]",
))
}
}
impl<'s> From<PreTokenizedInputSequence<'s>> for tk::InputSequence<'s> {
fn from(s: PreTokenizedInputSequence<'s>) -> Self {
s.0
}
}
struct TextEncodeInput<'s>(tk::EncodeInput<'s>);
impl<'s> FromPyObject<'s> for TextEncodeInput<'s> {
fn extract_bound(ob: &Bound<'s, PyAny>) -> PyResult<Self> {
if let Ok(i) = ob.extract::<TextInputSequence>() {
return Ok(Self(i.into()));
}
if let Ok((i1, i2)) = ob.extract::<(TextInputSequence, TextInputSequence)>() {
return Ok(Self((i1, i2).into()));
}
if let Ok(arr) = ob.extract::<Vec<Bound<PyAny>>>() {
if arr.len() == 2 {
let first = arr[0].extract::<TextInputSequence>()?;
let second = arr[1].extract::<TextInputSequence>()?;
return Ok(Self((first, second).into()));
}
}
Err(exceptions::PyTypeError::new_err(
"TextEncodeInput must be Union[TextInputSequence, Tuple[InputSequence, InputSequence]]",
))
}
}
impl<'s> From<TextEncodeInput<'s>> for tk::tokenizer::EncodeInput<'s> {
fn from(i: TextEncodeInput<'s>) -> Self {
i.0
}
}
struct PreTokenizedEncodeInput<'s>(tk::EncodeInput<'s>);
impl<'s> FromPyObject<'s> for PreTokenizedEncodeInput<'s> {
fn extract_bound(ob: &Bound<'s, PyAny>) -> PyResult<Self> {
if let Ok(i) = ob.extract::<PreTokenizedInputSequence>() {
return Ok(Self(i.into()));
}
if let Ok((i1, i2)) = ob.extract::<(PreTokenizedInputSequence, PreTokenizedInputSequence)>()
{
return Ok(Self((i1, i2).into()));
}
if let Ok(arr) = ob.extract::<Vec<Bound<PyAny>>>() {
if arr.len() == 2 {
let first = arr[0].extract::<PreTokenizedInputSequence>()?;
let second = arr[1].extract::<PreTokenizedInputSequence>()?;
return Ok(Self((first, second).into()));
}
}
Err(exceptions::PyTypeError::new_err(
"PreTokenizedEncodeInput must be Union[PreTokenizedInputSequence, \
Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence]]",
))
}
}
impl<'s> From<PreTokenizedEncodeInput<'s>> for tk::tokenizer::EncodeInput<'s> {
fn from(i: PreTokenizedEncodeInput<'s>) -> Self {
i.0
}
}
type Tokenizer = TokenizerImpl<PyModel, PyNormalizer, PyPreTokenizer, PyPostProcessor, PyDecoder>;
/// A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input
/// and outputs an :class:`~tokenizers.Encoding`.
///
/// Args:
/// model (:class:`~tokenizers.models.Model`):
/// The core algorithm that this :obj:`Tokenizer` should be using.
///
#[pyclass(dict, module = "tokenizers", name = "Tokenizer")]
#[derive(Clone, Serialize)]
#[serde(transparent)]
pub struct PyTokenizer {
pub(crate) tokenizer: Tokenizer,
}
impl PyTokenizer {
fn new(tokenizer: Tokenizer) -> Self {
PyTokenizer { tokenizer }
}
fn from_model(model: PyModel) -> Self {
PyTokenizer::new(TokenizerImpl::new(model))
}
}
#[pymethods]
impl PyTokenizer {
#[new]
#[pyo3(text_signature = "(self, model)")]
fn __new__(model: PyRef<PyModel>) -> Self {
PyTokenizer::from_model(model.clone())
}
fn __getstate__(&self, py: Python) -> PyResult<PyObject> {
let data = serde_json::to_string(&self.tokenizer).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while attempting to pickle Tokenizer: {}",
e
))
})?;
Ok(PyBytes::new(py, data.as_bytes()).into())
}
fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> {
match state.extract::<&[u8]>(py) {
Ok(s) => {
self.tokenizer = serde_json::from_slice(s).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while attempting to unpickle Tokenizer: {}",
e
))
})?;
Ok(())
}
Err(e) => Err(e),
}
}
fn __getnewargs__<'p>(&self, py: Python<'p>) -> PyResult<Bound<'p, PyTuple>> {
let model: PyObject = PyModel::from(BPE::default())
.into_pyobject(py)?
.into_any()
.into();
PyTuple::new(py, vec![model])
}
/// Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string.
///
/// Args:
/// json (:obj:`str`):
/// A valid JSON string representing a previously serialized
/// :class:`~tokenizers.Tokenizer`
///
/// Returns:
/// :class:`~tokenizers.Tokenizer`: The new tokenizer
#[staticmethod]
#[pyo3(text_signature = "(json)")]
fn from_str(json: &str) -> PyResult<Self> {
let tokenizer: PyResult<_> = ToPyResult(json.parse()).into();
Ok(Self::new(tokenizer?))
}
/// Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path.
///
/// Args:
/// path (:obj:`str`):
/// A path to a local JSON file representing a previously serialized
/// :class:`~tokenizers.Tokenizer`
///
/// Returns:
/// :class:`~tokenizers.Tokenizer`: The new tokenizer
#[staticmethod]
#[pyo3(text_signature = "(path)")]
fn from_file(path: &str) -> PyResult<Self> {
let tokenizer: PyResult<_> = ToPyResult(Tokenizer::from_file(path)).into();
Ok(Self::new(tokenizer?))
}
/// Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer.
///
/// Args:
/// buffer (:obj:`bytes`):
/// A buffer containing a previously serialized :class:`~tokenizers.Tokenizer`
///
/// Returns:
/// :class:`~tokenizers.Tokenizer`: The new tokenizer
#[staticmethod]
#[pyo3(text_signature = "(buffer)")]
fn from_buffer(buffer: &Bound<'_, PyBytes>) -> PyResult<Self> {
let tokenizer = serde_json::from_slice(buffer.as_bytes()).map_err(|e| {
exceptions::PyValueError::new_err(format!(
"Cannot instantiate Tokenizer from buffer: {}",
e
))
})?;
Ok(Self { tokenizer })
}
/// Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the
/// Hugging Face Hub.
///
/// Args:
/// identifier (:obj:`str`):
/// The identifier of a Model on the Hugging Face Hub, that contains
/// a tokenizer.json file
/// revision (:obj:`str`, defaults to `main`):
/// A branch or commit id
/// token (:obj:`str`, `optional`, defaults to `None`):
/// An optional auth token used to access private repositories on the
/// Hugging Face Hub
///
/// Returns:
/// :class:`~tokenizers.Tokenizer`: The new tokenizer
#[staticmethod]
#[pyo3(signature = (identifier, revision = String::from("main"), token = None))]
#[pyo3(text_signature = "(identifier, revision=\"main\", token=None)")]
fn from_pretrained(
identifier: &str,
revision: String,
token: Option<String>,
) -> PyResult<Self> {
let path = Python::with_gil(|py| -> PyResult<String> {
let huggingface_hub = PyModule::import(py, intern!(py, "huggingface_hub"))?;
let hf_hub_download = huggingface_hub.getattr(intern!(py, "hf_hub_download"))?;
let kwargs = [
(intern!(py, "repo_id"), identifier),
(intern!(py, "filename"), "tokenizer.json"),
(intern!(py, "revision"), &revision),
]
.into_py_dict(py)?;
if let Some(token) = token {
kwargs.set_item(intern!(py, "token"), token)?;
}
let path: String = hf_hub_download.call((), Some(&kwargs))?.extract()?;
Ok(path)
})?;
let tokenizer: PyResult<_> = ToPyResult(Tokenizer::from_file(path)).into();
Ok(Self::new(tokenizer?))
}
/// Gets a serialized string representing this :class:`~tokenizers.Tokenizer`.
///
/// Args:
/// pretty (:obj:`bool`, defaults to :obj:`False`):
/// Whether the JSON string should be pretty formatted.
///
/// Returns:
/// :obj:`str`: A string representing the serialized Tokenizer
#[pyo3(signature = (pretty = false))]
#[pyo3(text_signature = "(self, pretty=False)")]
fn to_str(&self, pretty: bool) -> PyResult<String> {
ToPyResult(self.tokenizer.to_string(pretty)).into()
}
/// Save the :class:`~tokenizers.Tokenizer` to the file at the given path.
///
/// Args:
/// path (:obj:`str`):
/// A path to a file in which to save the serialized tokenizer.
///
/// pretty (:obj:`bool`, defaults to :obj:`True`):
/// Whether the JSON file should be pretty formatted.
#[pyo3(signature = (path, pretty = true))]
#[pyo3(text_signature = "(self, path, pretty=True)")]
fn save(&self, path: &str, pretty: bool) -> PyResult<()> {
ToPyResult(self.tokenizer.save(path, pretty)).into()
}
fn __repr__(&self) -> PyResult<String> {
crate::utils::serde_pyo3::repr(self)
.map_err(|e| exceptions::PyException::new_err(e.to_string()))
}
fn __str__(&self) -> PyResult<String> {
crate::utils::serde_pyo3::to_string(self)
.map_err(|e| exceptions::PyException::new_err(e.to_string()))
}
/// Return the number of special tokens that would be added for single/pair sentences.
/// :param is_pair: Boolean indicating if the input would be a single sentence or a pair
/// :return:
#[pyo3(text_signature = "(self, is_pair)")]
fn num_special_tokens_to_add(&self, is_pair: bool) -> usize {
self.tokenizer
.get_post_processor()
.map_or(0, |p| p.added_tokens(is_pair))
}
/// Get the underlying vocabulary
///
/// Args:
/// with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
/// Whether to include the added tokens
///
/// Returns:
/// :obj:`Dict[str, int]`: The vocabulary
#[pyo3(signature = (with_added_tokens = true))]
#[pyo3(text_signature = "(self, with_added_tokens=True)")]
fn get_vocab(&self, with_added_tokens: bool) -> HashMap<String, u32> {
self.tokenizer.get_vocab(with_added_tokens)
}
/// Get the underlying vocabulary
///
/// Returns:
/// :obj:`Dict[int, AddedToken]`: The vocabulary
#[pyo3(signature = ())]
#[pyo3(text_signature = "(self)")]
fn get_added_tokens_decoder(&self) -> BTreeMap<u32, PyAddedToken> {
let mut sorted_map = BTreeMap::new();
for (key, value) in self.tokenizer.get_added_tokens_decoder() {
sorted_map.insert(key, value.into());
}
sorted_map
}
/// Get the size of the underlying vocabulary
///
/// Args:
/// with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
/// Whether to include the added tokens
///
/// Returns:
/// :obj:`int`: The size of the vocabulary
#[pyo3(signature = (with_added_tokens = true))]
#[pyo3(text_signature = "(self, with_added_tokens=True)")]
fn get_vocab_size(&self, with_added_tokens: bool) -> usize {
self.tokenizer.get_vocab_size(with_added_tokens)
}
/// Enable truncation
///
/// Args:
/// max_length (:obj:`int`):
/// The max length at which to truncate
///
/// stride (:obj:`int`, `optional`):
/// The length of the previous first sequence to be included in the overflowing
/// sequence
///
/// strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`):
/// The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or
/// ``only_second``.
///
/// direction (:obj:`str`, defaults to :obj:`right`):
/// Truncate direction
#[pyo3(signature = (max_length, **kwargs))]
#[pyo3(
text_signature = "(self, max_length, stride=0, strategy='longest_first', direction='right')"
)]
fn enable_truncation(
&mut self,
max_length: usize,
kwargs: Option<&Bound<'_, PyDict>>,
) -> PyResult<()> {
let mut params = TruncationParams {
max_length,
..Default::default()
};
if let Some(kwargs) = kwargs {
for (key, value) in kwargs {
let key: String = key.extract()?;
match key.as_ref() {
"stride" => params.stride = value.extract()?,
"strategy" => {
let value: String = value.extract()?;
params.strategy = match value.as_ref() {
"longest_first" => Ok(TruncationStrategy::LongestFirst),
"only_first" => Ok(TruncationStrategy::OnlyFirst),
"only_second" => Ok(TruncationStrategy::OnlySecond),
_ => Err(PyError(format!(
"Unknown `strategy`: `{}`. Use \
one of `longest_first`, `only_first`, or `only_second`",
value
))
.into_pyerr::<exceptions::PyValueError>()),
}?
}
"direction" => {
let value: String = value.extract()?;
params.direction = match value.as_ref() {
"left" => Ok(TruncationDirection::Left),
"right" => Ok(TruncationDirection::Right),
_ => Err(PyError(format!(
"Unknown `direction`: `{}`. Use \
one of `left` or `right`.",
value
))
.into_pyerr::<exceptions::PyValueError>()),
}?
}
_ => println!("Ignored unknown kwarg option {}", key),
}
}
}
if let Err(error_message) = self.tokenizer.with_truncation(Some(params)) {
return Err(PyError(error_message.to_string()).into_pyerr::<exceptions::PyValueError>());
}
Ok(())
}
/// Disable truncation
#[pyo3(text_signature = "(self)")]
fn no_truncation(&mut self) {
self.tokenizer
.with_truncation(None)
.expect("Failed to set truncation to `None`! This should never happen");
}
/// Get the currently set truncation parameters
///
/// `Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead`
///
/// Returns:
/// (:obj:`dict`, `optional`):
/// A dict with the current truncation parameters if truncation is enabled
#[getter]
fn get_truncation<'py>(&self, py: Python<'py>) -> PyResult<Option<Bound<'py, PyDict>>> {
self.tokenizer.get_truncation().map_or(Ok(None), |params| {
let dict = PyDict::new(py);
dict.set_item("max_length", params.max_length)?;
dict.set_item("stride", params.stride)?;
dict.set_item("strategy", params.strategy.as_ref())?;
dict.set_item("direction", params.direction.as_ref())?;
Ok(Some(dict))
})
}
/// Enable the padding
///
/// Args:
/// direction (:obj:`str`, `optional`, defaults to :obj:`right`):
/// The direction in which to pad. Can be either ``right`` or ``left``
///
/// pad_to_multiple_of (:obj:`int`, `optional`):
/// If specified, the padding length should always snap to the next multiple of the
/// given value. For example if we were going to pad witha length of 250 but
/// ``pad_to_multiple_of=8`` then we will pad to 256.
///
/// pad_id (:obj:`int`, defaults to 0):
/// The id to be used when padding
///
/// pad_type_id (:obj:`int`, defaults to 0):
/// The type id to be used when padding
///
/// pad_token (:obj:`str`, defaults to :obj:`[PAD]`):
/// The pad token to be used when padding
///
/// length (:obj:`int`, `optional`):
/// If specified, the length at which to pad. If not specified we pad using the size of
/// the longest sequence in a batch.
#[pyo3(signature = (**kwargs))]
#[pyo3(
text_signature = "(self, direction='right', pad_id=0, pad_type_id=0, pad_token='[PAD]', length=None, pad_to_multiple_of=None)"
)]
fn enable_padding(&mut self, kwargs: Option<&Bound<'_, PyDict>>) -> PyResult<()> {
let mut params = PaddingParams::default();
if let Some(kwargs) = kwargs {
for (key, value) in kwargs {
let key: String = key.extract()?;
match key.as_ref() {
"direction" => {
let value: String = value.extract()?;
params.direction = match value.as_ref() {
"left" => Ok(PaddingDirection::Left),
"right" => Ok(PaddingDirection::Right),
other => Err(PyError(format!(
"Unknown `direction`: `{}`. Use \
one of `left` or `right`",
other
))
.into_pyerr::<exceptions::PyValueError>()),
}?;
}
"pad_to_multiple_of" => {
if let Some(multiple) = value.extract()? {
params.pad_to_multiple_of = multiple;
}
}
"pad_id" => params.pad_id = value.extract()?,
"pad_type_id" => params.pad_type_id = value.extract()?,
"pad_token" => params.pad_token = value.extract()?,
"max_length" => {
println!(
"enable_padding(max_length=X) is deprecated, \
use enable_padding(length=X) instead"
);
if let Some(l) = value.extract()? {
params.strategy = PaddingStrategy::Fixed(l);
} else {
params.strategy = PaddingStrategy::BatchLongest;
}
}
"length" => {
if let Some(l) = value.extract()? {
params.strategy = PaddingStrategy::Fixed(l);
} else {
params.strategy = PaddingStrategy::BatchLongest;
}
}
_ => println!("Ignored unknown kwarg option {}", key),
}
}
}
self.tokenizer.with_padding(Some(params));
Ok(())
}
/// Disable padding
#[pyo3(text_signature = "(self)")]
fn no_padding(&mut self) {
self.tokenizer.with_padding(None);
}
/// Get the current padding parameters
///
/// `Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead`
///
/// Returns:
/// (:obj:`dict`, `optional`):
/// A dict with the current padding parameters if padding is enabled
#[getter]
fn get_padding<'py>(&self, py: Python<'py>) -> PyResult<Option<Bound<'py, PyDict>>> {
self.tokenizer.get_padding().map_or(Ok(None), |params| {
let dict = PyDict::new(py);
dict.set_item(
"length",
match params.strategy {
tk::PaddingStrategy::BatchLongest => None,
tk::PaddingStrategy::Fixed(size) => Some(size),
},
)?;
dict.set_item("pad_to_multiple_of", params.pad_to_multiple_of)?;
dict.set_item("pad_id", params.pad_id)?;
dict.set_item("pad_token", ¶ms.pad_token)?;
dict.set_item("pad_type_id", params.pad_type_id)?;
dict.set_item("direction", params.direction.as_ref())?;
Ok(Some(dict))
})
}
/// Encode the given sequence and pair. This method can process raw text sequences
/// as well as already pre-tokenized sequences.
///
/// Example:
/// Here are some examples of the inputs that are accepted::
///
/// encode("A single sequence")`
/// encode("A sequence", "And its pair")`
/// encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)`
/// encode(
/// [ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ],
/// is_pretokenized=True
/// )
///
/// Args:
/// sequence (:obj:`~tokenizers.InputSequence`):
/// The main input sequence we want to encode. This sequence can be either raw
/// text or pre-tokenized, according to the ``is_pretokenized`` argument:
///
/// - If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence`
/// - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence`
///
/// pair (:obj:`~tokenizers.InputSequence`, `optional`):
/// An optional input sequence. The expected format is the same that for ``sequence``.
///
/// is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
/// Whether the input is already pre-tokenized
///
/// add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
/// Whether to add the special tokens
///
/// Returns:
/// :class:`~tokenizers.Encoding`: The encoded result
///
#[pyo3(signature = (sequence, pair = None, is_pretokenized = false, add_special_tokens = true))]
#[pyo3(
text_signature = "(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True)"
)]
fn encode(
&self,
sequence: &Bound<'_, PyAny>,
pair: Option<&Bound<'_, PyAny>>,
is_pretokenized: bool,
add_special_tokens: bool,
) -> PyResult<PyEncoding> {
let sequence: tk::InputSequence = if is_pretokenized {
sequence.extract::<PreTokenizedInputSequence>()?.into()
} else {
sequence.extract::<TextInputSequence>()?.into()
};
let input = match pair {
Some(pair) => {
let pair: tk::InputSequence = if is_pretokenized {
pair.extract::<PreTokenizedInputSequence>()?.into()
} else {
pair.extract::<TextInputSequence>()?.into()
};
tk::EncodeInput::Dual(sequence, pair)
}
None => tk::EncodeInput::Single(sequence),
};
ToPyResult(
self.tokenizer
.encode_char_offsets(input, add_special_tokens)
.map(|e| e.into()),
)
.into()
}
/// Encode the given batch of inputs. This method accept both raw text sequences
/// as well as already pre-tokenized sequences. The reason we use `PySequence` is
/// because it allows type checking with zero-cost (according to PyO3) as we don't
/// have to convert to check.
///
/// Example:
/// Here are some examples of the inputs that are accepted::
///
/// encode_batch([
/// "A single sequence",
/// ("A tuple with a sequence", "And its pair"),
/// [ "A", "pre", "tokenized", "sequence" ],
/// ([ "A", "pre", "tokenized", "sequence" ], "And its pair")
/// ])
///
/// Args:
/// input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`):
/// A list of single sequences or pair sequences to encode. Each sequence
/// can be either raw text or pre-tokenized, according to the ``is_pretokenized``
/// argument:
///
/// - If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput`
/// - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput`
///
/// is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
/// Whether the input is already pre-tokenized
///
/// add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
/// Whether to add the special tokens
///
/// Returns:
/// A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch
///
#[pyo3(signature = (input, is_pretokenized = false, add_special_tokens = true))]
#[pyo3(text_signature = "(self, input, is_pretokenized=False, add_special_tokens=True)")]
fn encode_batch(
&self,
py: Python<'_>,
input: Vec<Bound<'_, PyAny>>,
is_pretokenized: bool,
add_special_tokens: bool,
) -> PyResult<Vec<PyEncoding>> {
let mut items = Vec::<tk::EncodeInput>::with_capacity(input.len());
for item in &input {
let item: tk::EncodeInput = if is_pretokenized {
item.extract::<PreTokenizedEncodeInput>()?.into()
} else {
item.extract::<TextEncodeInput>()?.into()
};
items.push(item);
}
py.allow_threads(|| {
ToPyResult(
self.tokenizer
.encode_batch_char_offsets(items, add_special_tokens)
.map(|encodings| encodings.into_iter().map(|e| e.into()).collect()),
)
.into()
})
}
/// Encode the given batch of inputs. This method is faster than `encode_batch`
/// because it doesn't keep track of offsets, they will be all zeros.
///
/// Example:
/// Here are some examples of the inputs that are accepted::
///
/// encode_batch_fast([
/// "A single sequence",
/// ("A tuple with a sequence", "And its pair"),
/// [ "A", "pre", "tokenized", "sequence" ],
/// ([ "A", "pre", "tokenized", "sequence" ], "And its pair")
/// ])
///
/// Args:
/// input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`):
/// A list of single sequences or pair sequences to encode. Each sequence
/// can be either raw text or pre-tokenized, according to the ``is_pretokenized``
/// argument:
///
/// - If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput`
/// - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput`
///
/// is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
/// Whether the input is already pre-tokenized
///
/// add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
/// Whether to add the special tokens
///
/// Returns:
/// A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch
///
#[pyo3(signature = (input, is_pretokenized = false, add_special_tokens = true))]
#[pyo3(text_signature = "(self, input, is_pretokenized=False, add_special_tokens=True)")]
fn encode_batch_fast(
&self,
py: Python<'_>,
input: Vec<Bound<'_, PyAny>>,
is_pretokenized: bool,
add_special_tokens: bool,
) -> PyResult<Vec<PyEncoding>> {
let mut items = Vec::<tk::EncodeInput>::with_capacity(input.len());
for item in &input {
let item: tk::EncodeInput = if is_pretokenized {
item.extract::<PreTokenizedEncodeInput>()?.into()
} else {
item.extract::<TextEncodeInput>()?.into()
};
items.push(item);
}
py.allow_threads(|| {
ToPyResult(
self.tokenizer
.encode_batch_fast(items, add_special_tokens)
.map(|encodings| encodings.into_iter().map(|e| e.into()).collect()),
)
.into()
})
}
/// Decode the given list of ids back to a string
///
/// This is used to decode anything coming back from a Language Model
///
/// Args:
/// ids (A :obj:`List/Tuple` of :obj:`int`):
/// The list of ids that we want to decode
///
/// skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
/// Whether the special tokens should be removed from the decoded string
///
/// Returns:
/// :obj:`str`: The decoded string
#[pyo3(signature = (ids, skip_special_tokens = true))]
#[pyo3(text_signature = "(self, ids, skip_special_tokens=True)")]
fn decode(&self, ids: Vec<u32>, skip_special_tokens: bool) -> PyResult<String> {
ToPyResult(self.tokenizer.decode(&ids, skip_special_tokens)).into()
}
/// Decode a batch of ids back to their corresponding string
///
/// Args:
/// sequences (:obj:`List` of :obj:`List[int]`):
/// The batch of sequences we want to decode
///
/// skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
/// Whether the special tokens should be removed from the decoded strings
///
/// Returns:
/// :obj:`List[str]`: A list of decoded strings
#[pyo3(signature = (sequences, skip_special_tokens = true))]
#[pyo3(text_signature = "(self, sequences, skip_special_tokens=True)")]
fn decode_batch(
&self,
py: Python<'_>,
sequences: Vec<Vec<u32>>,
skip_special_tokens: bool,
) -> PyResult<Vec<String>> {
py.allow_threads(|| {
let slices = sequences.iter().map(|v| &v[..]).collect::<Vec<&[u32]>>();
ToPyResult(self.tokenizer.decode_batch(&slices, skip_special_tokens)).into()
})
}
/// Convert the given token to its corresponding id if it exists
///
/// Args:
/// token (:obj:`str`):
/// The token to convert
///
/// Returns:
/// :obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary
#[pyo3(text_signature = "(self, token)")]
fn token_to_id(&self, token: &str) -> Option<u32> {
self.tokenizer.token_to_id(token)
}
/// Convert the given id to its corresponding token if it exists
///
/// Args:
/// id (:obj:`int`):
/// The id to convert
///
/// Returns:
/// :obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary
#[pyo3(text_signature = "(self, id)")]
fn id_to_token(&self, id: u32) -> Option<String> {
self.tokenizer.id_to_token(id)
}
/// Modifies the tokenizer in order to use or not the special tokens
/// during encoding.
///
/// Args:
/// value (:obj:`bool`):
/// Whether to use the special tokens or not
///
#[setter]
fn set_encode_special_tokens(&mut self, value: bool) {
self.tokenizer.set_encode_special_tokens(value);
}
/// Get the value of the `encode_special_tokens` attribute
///
/// Returns:
/// :obj:`bool`: the tokenizer's encode_special_tokens attribute
#[getter]
fn get_encode_special_tokens(&self) -> bool {
self.tokenizer.get_encode_special_tokens()
}
/// Add the given tokens to the vocabulary
///
/// The given tokens are added only if they don't already exist in the vocabulary.
/// Each token then gets a new attributed id.
///
/// Args:
/// tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
/// The list of tokens we want to add to the vocabulary. Each token can be either a
/// string or an instance of :class:`~tokenizers.AddedToken` for more customization.
///
/// Returns:
/// :obj:`int`: The number of tokens that were created in the vocabulary
#[pyo3(text_signature = "(self, tokens)")]
fn add_tokens(&mut self, tokens: &Bound<'_, PyList>) -> PyResult<usize> {
let tokens = tokens
.into_iter()
.map(|token| {
if let Ok(content) = token.extract::<String>() {
Ok(PyAddedToken::from(content, Some(false)).get_token())
} else if let Ok(token) = token.extract::<PyRefMut<PyAddedToken>>() {
Ok(token.get_token())
} else {
Err(exceptions::PyTypeError::new_err(
"Input must be a List[Union[str, AddedToken]]",
))
}
})
.collect::<PyResult<Vec<_>>>()?;
Ok(self.tokenizer.add_tokens(&tokens))
}
/// Add the given special tokens to the Tokenizer.
///
/// If these tokens are already part of the vocabulary, it just let the Tokenizer know about
/// them. If they don't exist, the Tokenizer creates them, giving them a new id.
///
/// These special tokens will never be processed by the model (ie won't be split into
/// multiple tokens), and they can be removed from the output when decoding.
///
/// Args:
/// tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
/// The list of special tokens we want to add to the vocabulary. Each token can either
/// be a string or an instance of :class:`~tokenizers.AddedToken` for more
/// customization.
///
/// Returns:
/// :obj:`int`: The number of tokens that were created in the vocabulary
#[pyo3(text_signature = "(self, tokens)")]
fn add_special_tokens(&mut self, tokens: &Bound<'_, PyList>) -> PyResult<usize> {
let tokens = tokens
.into_iter()
.map(|token| {
if let Ok(content) = token.extract::<String>() {
Ok(tk::tokenizer::AddedToken::from(content, true))
} else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() {
token.special = true;
Ok(token.get_token())
} else {
Err(exceptions::PyTypeError::new_err(
"Input must be a List[Union[str, AddedToken]]",
))
}
})
.collect::<PyResult<Vec<_>>>()?;
Ok(self.tokenizer.add_special_tokens(&tokens))
}
/// Train the Tokenizer using the given files.
///
/// Reads the files line by line, while keeping all the whitespace, even new lines.
/// If you want to train from data store in-memory, you can check
/// :meth:`~tokenizers.Tokenizer.train_from_iterator`
///
/// Args:
/// files (:obj:`List[str]`):
/// A list of path to the files that we should use for training
///
/// trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
/// An optional trainer that should be used to train our Model
#[pyo3(signature = (files, trainer = None))]
#[pyo3(text_signature = "(self, files, trainer = None)")]
fn train(&mut self, files: Vec<String>, trainer: Option<&mut PyTrainer>) -> PyResult<()> {
let mut trainer =
trainer.map_or_else(|| self.tokenizer.get_model().get_trainer(), |t| t.clone());
Python::with_gil(|py| {
py.allow_threads(|| {
ToPyResult(
self.tokenizer
.train_from_files(&mut trainer, files)
.map(|_| {}),
)
.into()
})
})
}
/// Train the Tokenizer using the provided iterator.
///
/// You can provide anything that is a Python Iterator
///
/// * A list of sequences :obj:`List[str]`
/// * A generator that yields :obj:`str` or :obj:`List[str]`
/// * A Numpy array of strings
/// * ...
///
/// Args:
/// iterator (:obj:`Iterator`):
/// Any iterator over strings or list of strings
///
/// trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
/// An optional trainer that should be used to train our Model
///
/// length (:obj:`int`, `optional`):
/// The total number of sequences in the iterator. This is used to
/// provide meaningful progress tracking
#[pyo3(signature = (iterator, trainer = None, length = None))]
#[pyo3(text_signature = "(self, iterator, trainer=None, length=None)")]
fn train_from_iterator(
&mut self,
py: Python,
iterator: &Bound<'_, PyAny>,
trainer: Option<&mut PyTrainer>,
length: Option<usize>,
) -> PyResult<()> {
let mut trainer =
trainer.map_or_else(|| self.tokenizer.get_model().get_trainer(), |t| t.clone());
let buffered_iter = PyBufferedIterator::new(
iterator,
|element| {
// Each element of the iterator can either be:
// - An iterator, to allow batching
// - A string
if let Ok(s) = element.downcast::<PyString>() {
itertools::Either::Right(std::iter::once(s.to_cow().map(|s| s.into_owned())))
} else {
match element.try_iter() {
Ok(iter) => itertools::Either::Left(
iter.map(|i| i?.extract::<String>())
.collect::<Vec<_>>()
.into_iter(),
),
Err(e) => itertools::Either::Right(std::iter::once(Err(e))),
}
}
},
256,
)?;
py.allow_threads(|| {
ResultShunt::process(buffered_iter, |iter| {
self.tokenizer
.train(&mut trainer, MaybeSizedIterator::new(iter, length))
.map(|_| {})
.map_err(|e| exceptions::PyException::new_err(e.to_string()))
})?
})
}
/// Apply all the post-processing steps to the given encodings.
///
/// The various steps are:
///
/// 1. Truncate according to the set truncation params (provided with
/// :meth:`~tokenizers.Tokenizer.enable_truncation`)
/// 2. Apply the :class:`~tokenizers.processors.PostProcessor`
/// 3. Pad according to the set padding params (provided with
/// :meth:`~tokenizers.Tokenizer.enable_padding`)
///
/// Args:
/// encoding (:class:`~tokenizers.Encoding`):
/// The :class:`~tokenizers.Encoding` corresponding to the main sequence.
///
/// pair (:class:`~tokenizers.Encoding`, `optional`):
/// An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence.
///
/// add_special_tokens (:obj:`bool`):
/// Whether to add the special tokens
///
/// Returns:
/// :class:`~tokenizers.Encoding`: The final post-processed encoding
#[pyo3(signature = (encoding, pair = None, add_special_tokens = true))]
#[pyo3(text_signature = "(self, encoding, pair=None, add_special_tokens=True)")]
fn post_process(
&self,
encoding: &PyEncoding,
pair: Option<&PyEncoding>,
add_special_tokens: bool,
) -> PyResult<PyEncoding> {
ToPyResult(
self.tokenizer
.post_process(
encoding.encoding.clone(),
pair.map(|p| p.encoding.clone()),
add_special_tokens,
)
.map(|e| e.into()),
)
.into()
}
/// The :class:`~tokenizers.models.Model` in use by the Tokenizer
#[getter]
fn get_model(&self, py: Python<'_>) -> PyResult<PyObject> {
self.tokenizer.get_model().get_as_subtype(py)
}
/// Set the :class:`~tokenizers.models.Model`
#[setter]
fn set_model(&mut self, model: PyRef<PyModel>) {
self.tokenizer.with_model(model.clone());
}
/// The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer
#[getter]
fn get_normalizer(&self, py: Python<'_>) -> PyResult<PyObject> {
if let Some(n) = self.tokenizer.get_normalizer() {
n.get_as_subtype(py)
} else {
Ok(py.None())
}
}
/// Set the :class:`~tokenizers.normalizers.Normalizer`
#[setter]
fn set_normalizer(&mut self, normalizer: Option<PyRef<PyNormalizer>>) {
let normalizer_option = normalizer.map(|norm| norm.clone());
self.tokenizer.with_normalizer(normalizer_option);
}
/// The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer
#[getter]
fn get_pre_tokenizer(&self, py: Python<'_>) -> PyResult<PyObject> {
if let Some(pt) = self.tokenizer.get_pre_tokenizer() {
pt.get_as_subtype(py)
} else {
Ok(py.None())
}
}
/// Set the :class:`~tokenizers.normalizers.Normalizer`
#[setter]
fn set_pre_tokenizer(&mut self, pretok: Option<PyRef<PyPreTokenizer>>) {
self.tokenizer
.with_pre_tokenizer(pretok.map(|pre| pre.clone()));
}
/// The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer
#[getter]
fn get_post_processor(&self, py: Python<'_>) -> PyResult<PyObject> {
if let Some(n) = self.tokenizer.get_post_processor() {
n.get_as_subtype(py)
} else {
Ok(py.None())
}
}
/// Set the :class:`~tokenizers.processors.PostProcessor`
#[setter]
fn set_post_processor(&mut self, processor: Option<PyRef<PyPostProcessor>>) {
self.tokenizer
.with_post_processor(processor.map(|p| p.clone()));
}
/// The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer
#[getter]
fn get_decoder(&self, py: Python<'_>) -> PyResult<PyObject> {
if let Some(dec) = self.tokenizer.get_decoder() {
dec.get_as_subtype(py)
} else {
Ok(py.None())
}
}
/// Set the :class:`~tokenizers.decoders.Decoder`
#[setter]
fn set_decoder(&mut self, decoder: Option<PyRef<PyDecoder>>) {
self.tokenizer.with_decoder(decoder.map(|d| d.clone()));
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::models::PyModel;
use crate::normalizers::{PyNormalizer, PyNormalizerTypeWrapper};
use std::sync::{Arc, RwLock};
use tempfile::NamedTempFile;
use tk::normalizers::{Lowercase, NFKC};
#[test]
fn serialize() {
let mut tokenizer = Tokenizer::new(PyModel::from(BPE::default()));
tokenizer.with_normalizer(Some(PyNormalizer::new(PyNormalizerTypeWrapper::Sequence(
vec![
Arc::new(RwLock::new(NFKC.into())),
Arc::new(RwLock::new(Lowercase.into())),
],
))));
let tmp = NamedTempFile::new().unwrap().into_temp_path();
tokenizer.save(&tmp, false).unwrap();
Tokenizer::from_file(&tmp).unwrap();
}
#[test]
fn serde_pyo3() {
let mut tokenizer = Tokenizer::new(PyModel::from(BPE::default()));
tokenizer.with_normalizer(Some(PyNormalizer::new(PyNormalizerTypeWrapper::Sequence(
vec![
Arc::new(RwLock::new(NFKC.into())),
Arc::new(RwLock::new(Lowercase.into())),
],
))));
let output = crate::utils::serde_pyo3::to_string(&tokenizer).unwrap();
assert_eq!(output, "Tokenizer(version=\"1.0\", truncation=None, padding=None, added_tokens=[], normalizer=Sequence(normalizers=[NFKC(), Lowercase()]), pre_tokenizer=None, post_processor=None, decoder=None, model=BPE(dropout=None, unk_token=None, continuing_subword_prefix=None, end_of_word_suffix=None, fuse_unk=False, byte_fallback=False, ignore_merges=False, vocab={}, merges=[]))");
}
}
| tokenizers/bindings/python/src/tokenizer.rs/0 | {
"file_path": "tokenizers/bindings/python/src/tokenizer.rs",
"repo_id": "tokenizers",
"token_count": 28074
} |
import json
import pickle
import pytest
from tokenizers.pre_tokenizers import (
BertPreTokenizer,
ByteLevel,
CharDelimiterSplit,
Digits,
Metaspace,
PreTokenizer,
Punctuation,
Sequence,
Split,
UnicodeScripts,
Whitespace,
WhitespaceSplit,
)
class TestByteLevel:
def test_instantiate(self):
assert ByteLevel() is not None
assert ByteLevel(add_prefix_space=True) is not None
assert ByteLevel(add_prefix_space=False) is not None
assert isinstance(ByteLevel(), PreTokenizer)
assert isinstance(ByteLevel(), ByteLevel)
assert isinstance(pickle.loads(pickle.dumps(ByteLevel())), ByteLevel)
def test_has_alphabet(self):
assert isinstance(ByteLevel.alphabet(), list)
assert len(ByteLevel.alphabet()) == 256
def test_can_modify(self):
pretok = ByteLevel(add_prefix_space=False)
assert pretok.add_prefix_space == False
# Modify these
pretok.add_prefix_space = True
assert pretok.add_prefix_space == True
def test_manual_reload(self):
byte_level = ByteLevel()
state = json.loads(byte_level.__getstate__())
reloaded = ByteLevel(**state)
assert isinstance(reloaded, ByteLevel)
class TestSplit:
def test_instantiate(self):
pre_tokenizer = Split(pattern=" ", behavior="removed")
assert pre_tokenizer is not None
assert isinstance(pre_tokenizer, PreTokenizer)
assert isinstance(pre_tokenizer, Split)
assert isinstance(pickle.loads(pickle.dumps(Split(" ", "removed"))), Split)
# test with invert=True
pre_tokenizer_with_invert = Split(pattern=" ", behavior="isolated", invert=True)
assert pre_tokenizer_with_invert is not None
assert isinstance(pre_tokenizer_with_invert, PreTokenizer)
assert isinstance(pre_tokenizer_with_invert, Split)
assert isinstance(pickle.loads(pickle.dumps(Split(" ", "removed", True))), Split)
class TestWhitespace:
def test_instantiate(self):
assert Whitespace() is not None
assert isinstance(Whitespace(), PreTokenizer)
assert isinstance(Whitespace(), Whitespace)
assert isinstance(pickle.loads(pickle.dumps(Whitespace())), Whitespace)
class TestWhitespaceSplit:
def test_instantiate(self):
assert WhitespaceSplit() is not None
assert isinstance(WhitespaceSplit(), PreTokenizer)
assert isinstance(WhitespaceSplit(), WhitespaceSplit)
assert isinstance(pickle.loads(pickle.dumps(WhitespaceSplit())), WhitespaceSplit)
class TestBertPreTokenizer:
def test_instantiate(self):
assert BertPreTokenizer() is not None
assert isinstance(BertPreTokenizer(), PreTokenizer)
assert isinstance(BertPreTokenizer(), BertPreTokenizer)
assert isinstance(pickle.loads(pickle.dumps(BertPreTokenizer())), BertPreTokenizer)
class TestMetaspace:
def test_instantiate(self):
assert Metaspace() is not None
assert Metaspace(replacement="-") is not None
with pytest.raises(ValueError, match="expected a string of length 1"):
Metaspace(replacement="")
assert Metaspace(prepend_scheme="always") is not None
assert isinstance(Metaspace(), PreTokenizer)
assert isinstance(Metaspace(), Metaspace)
assert isinstance(pickle.loads(pickle.dumps(Metaspace())), Metaspace)
def test_can_modify(self):
pretok = Metaspace(replacement="$", prepend_scheme="never")
assert pretok.replacement == "$"
assert pretok.prepend_scheme == "never"
assert pretok.split == True
# Modify these
pretok.replacement = "%"
assert pretok.replacement == "%"
pretok.prepend_scheme = "first"
assert pretok.prepend_scheme == "first"
pretok.split = True
assert pretok.split == True
class TestCharDelimiterSplit:
def test_instantiate(self):
assert CharDelimiterSplit("-") is not None
with pytest.raises(ValueError, match="expected a string of length 1"):
CharDelimiterSplit("")
assert isinstance(CharDelimiterSplit(" "), PreTokenizer)
assert isinstance(CharDelimiterSplit(" "), CharDelimiterSplit)
assert isinstance(pickle.loads(pickle.dumps(CharDelimiterSplit("-"))), CharDelimiterSplit)
def test_can_modify(self):
pretok = CharDelimiterSplit("@")
assert pretok.delimiter == "@"
# Modify these
pretok.delimiter = "!"
assert pretok.delimiter == "!"
class TestPunctuation:
def test_instantiate(self):
assert Punctuation() is not None
assert Punctuation("removed") is not None
assert isinstance(Punctuation(), PreTokenizer)
assert isinstance(Punctuation(), Punctuation)
assert isinstance(pickle.loads(pickle.dumps(Punctuation())), Punctuation)
class TestSequence:
def test_instantiate(self):
assert Sequence([]) is not None
assert isinstance(Sequence([]), PreTokenizer)
assert isinstance(Sequence([]), Sequence)
dumped = pickle.dumps(Sequence([]))
assert isinstance(pickle.loads(dumped), Sequence)
def test_bert_like(self):
pre_tokenizer = Sequence([WhitespaceSplit(), Punctuation()])
assert isinstance(Sequence([]), PreTokenizer)
assert isinstance(Sequence([]), Sequence)
assert isinstance(pickle.loads(pickle.dumps(pre_tokenizer)), Sequence)
result = pre_tokenizer.pre_tokenize_str("Hey friend! How are you?!?")
assert result == [
("Hey", (0, 3)),
("friend", (4, 10)),
("!", (10, 11)),
("How", (16, 19)),
("are", (20, 23)),
("you", (24, 27)),
("?", (27, 28)),
("!", (28, 29)),
("?", (29, 30)),
]
def test_set_item(self):
pre_tokenizers = Sequence(
[
ByteLevel(),
Split(pattern="/test/", behavior="removed"),
]
)
assert pre_tokenizers[0].__class__ == ByteLevel
assert pre_tokenizers[1].__class__ == Split
pre_tokenizers[1] = Metaspace()
assert pre_tokenizers[1].__class__ == Metaspace
with pytest.raises(IndexError):
print(pre_tokenizers[2])
def test_item_getters_and_setters(self):
pre_tokenizers = Sequence(
[
ByteLevel(add_prefix_space=True, trim_offsets=True, use_regex=True),
Split(pattern="/test/", behavior="removed", invert=False),
Metaspace("a", "never", split=False),
CharDelimiterSplit(delimiter=" "),
Punctuation(behavior="removed"),
Digits(individual_digits=True),
]
)
assert pre_tokenizers[0].__class__ == ByteLevel
pre_tokenizers[0].add_prefix_space = False
pre_tokenizers[0].trim_offsets = False
pre_tokenizers[0].use_regex = False
assert not pre_tokenizers[0].add_prefix_space
assert not pre_tokenizers[0].trim_offsets
assert not pre_tokenizers[0].use_regex
assert pre_tokenizers[1].__class__ == Split
with pytest.raises(Exception):
pre_tokenizers[1].pattern = "/pattern/"
pre_tokenizers[1].behavior = "isolated"
pre_tokenizers[1].invert = True
with pytest.raises(Exception):
pre_tokenizers[1].pattern
assert pre_tokenizers[1].behavior == "isolated"
assert pre_tokenizers[1].invert
assert pre_tokenizers[2].__class__ == Metaspace
pre_tokenizers[2].replacement = " "
pre_tokenizers[2].prepend_scheme = "always"
pre_tokenizers[2].split = True
assert pre_tokenizers[2].replacement == " "
assert pre_tokenizers[2].prepend_scheme == "always"
assert pre_tokenizers[2].split
assert pre_tokenizers[3].__class__ == CharDelimiterSplit
pre_tokenizers[3].delimiter = "_"
assert pre_tokenizers[3].delimiter == "_"
assert pre_tokenizers[4].__class__ == Punctuation
pre_tokenizers[4].behavior = "isolated"
assert pre_tokenizers[4].behavior == "isolated"
assert pre_tokenizers[5].__class__ == Digits
pre_tokenizers[5].individual_digits = False
assert not pre_tokenizers[5].individual_digits
class TestDigits:
def test_instantiate(self):
assert Digits() is not None
assert isinstance(Digits(), PreTokenizer)
assert isinstance(Digits(), Digits)
assert isinstance(Digits(True), Digits)
assert isinstance(Digits(False), Digits)
assert isinstance(pickle.loads(pickle.dumps(Digits())), Digits)
def test_can_modify(self):
pretok = Digits(individual_digits=False)
assert pretok.individual_digits == False
# Modify these
pretok.individual_digits = True
assert pretok.individual_digits == True
class TestUnicodeScripts:
def test_instantiate(self):
assert UnicodeScripts() is not None
assert isinstance(UnicodeScripts(), PreTokenizer)
assert isinstance(UnicodeScripts(), UnicodeScripts)
assert isinstance(pickle.loads(pickle.dumps(UnicodeScripts())), UnicodeScripts)
class TestCustomPreTokenizer:
class BadCustomPretok:
def pre_tokenize(self, pretok, wrong):
# This method does not have the right signature: it takes one too many arg
pass
class GoodCustomPretok:
def split(self, n, normalized):
# Here we just test that we can return a List[NormalizedString], it
# does not really make sense to return twice the same otherwise
return [normalized, normalized]
def pre_tokenize(self, pretok):
pretok.split(self.split)
def test_instantiate(self):
bad = PreTokenizer.custom(TestCustomPreTokenizer.BadCustomPretok())
good = PreTokenizer.custom(TestCustomPreTokenizer.GoodCustomPretok())
assert isinstance(bad, PreTokenizer)
assert isinstance(good, PreTokenizer)
with pytest.raises(Exception, match="TypeError:.*pre_tokenize()"):
bad.pre_tokenize_str("Hey there!")
assert good.pre_tokenize_str("Hey there!") == [
("Hey there!", (0, 10)),
("Hey there!", (0, 10)),
]
def test_camel_case(self):
class CamelCasePretok:
def get_state(self, c):
if c.islower():
return "lower"
elif c.isupper():
return "upper"
elif c.isdigit():
return "digit"
else:
return "rest"
def split(self, n, normalized):
i = 0
# states = {"any", "lower", "upper", "digit", "rest"}
state = "any"
pieces = []
for j, c in enumerate(normalized.normalized):
c_state = self.get_state(c)
if state == "any":
state = c_state
if state != "rest" and state == c_state:
pass
elif state == "upper" and c_state == "lower":
pass
else:
pieces.append(normalized[i:j])
i = j
state = c_state
pieces.append(normalized[i:])
return pieces
def pre_tokenize(self, pretok):
pretok.split(self.split)
camel = PreTokenizer.custom(CamelCasePretok())
assert camel.pre_tokenize_str("HeyThere!?-ThisIsLife") == [
("Hey", (0, 3)),
("There", (3, 8)),
("!", (8, 9)),
("?", (9, 10)),
("-", (10, 11)),
("This", (11, 15)),
("Is", (15, 17)),
("Life", (17, 21)),
]
| tokenizers/bindings/python/tests/bindings/test_pre_tokenizers.py/0 | {
"file_path": "tokenizers/bindings/python/tests/bindings/test_pre_tokenizers.py",
"repo_id": "tokenizers",
"token_count": 5390
} |
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for those with `?=`
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
BUILDDIR ?= build
SOURCEDIR = source
# Put it first so that "make" without argument is like "make html_all".
html_all:
@echo "Generating doc for Rust"
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/rust" $(SPHINXOPTS) $(O) -t rust
@echo "Generating doc for Python"
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/python" $(SPHINXOPTS) $(O) -t python
@echo "Generating doc for Node.js"
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/node" $(SPHINXOPTS) $(O) -t node
.PHONY: html_all Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
| tokenizers/docs/Makefile/0 | {
"file_path": "tokenizers/docs/Makefile",
"repo_id": "tokenizers",
"token_count": 393
} |
<!-- DISABLE-FRONTMATTER-SECTIONS -->
# Tokenizers
Fast State-of-the-art tokenizers, optimized for both research and
production
[🤗 Tokenizers](https://github.com/huggingface/tokenizers) provides an
implementation of today's most used tokenizers, with a focus on
performance and versatility. These tokenizers are also used in [🤗 Transformers](https://github.com/huggingface/transformers).
# Main features:
- Train new vocabularies and tokenize, using today's most used tokenizers.
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for both research and production.
- Full alignment tracking. Even with destructive normalization, it's always possible to get the part of the original sentence that corresponds to any token.
- Does all the pre-processing: Truncation, Padding, add the special tokens your model needs.
| tokenizers/docs/source-doc-builder/index.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/index.mdx",
"repo_id": "tokenizers",
"token_count": 250
} |
Input sequences
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These types represent all the different kinds of sequence that can be used as input of a Tokenizer.
Globally, any sequence can be either a string or a list of strings, according to the operating
mode of the tokenizer: ``raw text`` vs ``pre-tokenized``.
.. autodata:: tokenizers.TextInputSequence
.. autodata:: tokenizers.PreTokenizedInputSequence
.. autodata:: tokenizers.InputSequence
Encode inputs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These types represent all the different kinds of input that a :class:`~tokenizers.Tokenizer` accepts
when using :meth:`~tokenizers.Tokenizer.encode_batch`.
.. autodata:: tokenizers.TextEncodeInput
.. autodata:: tokenizers.PreTokenizedEncodeInput
.. autodata:: tokenizers.EncodeInput
Tokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: tokenizers.Tokenizer
:members:
Encoding
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: tokenizers.Encoding
:members:
Added Tokens
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: tokenizers.AddedToken
:members:
Models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.models
:members:
Normalizers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.normalizers
:members:
Pre-tokenizers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.pre_tokenizers
:members:
Post-processor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.processors
:members:
Trainers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.trainers
:members:
Decoders
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.decoders
:members:
Visualizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: tokenizers.tools.Annotation
:members:
.. autoclass:: tokenizers.tools.EncodingVisualizer
:members: __call__
| tokenizers/docs/source/api/python.inc/0 | {
"file_path": "tokenizers/docs/source/api/python.inc",
"repo_id": "tokenizers",
"token_count": 562
} |
pub fn set_panic_hook() {
// When the `console_error_panic_hook` feature is enabled, we can call the
// `set_panic_hook` function at least once during initialization, and then
// we will get better error messages if our code ever panics.
//
// For more details see
// https://github.com/rustwasm/console_error_panic_hook#readme
#[cfg(feature = "console_error_panic_hook")]
console_error_panic_hook::set_once();
}
| tokenizers/tokenizers/examples/unstable_wasm/src/utils.rs/0 | {
"file_path": "tokenizers/tokenizers/examples/unstable_wasm/src/utils.rs",
"repo_id": "tokenizers",
"token_count": 150
} |
use crate::tokenizer::{Decoder, Result};
use monostate::MustBe;
use serde::{Deserialize, Serialize};
#[derive(Deserialize, Clone, Debug, Serialize, Default)]
/// ByteFallback is a simple trick which converts tokens looking like `<0x61>`
/// to pure bytes, and attempts to make them into a string. If the tokens
/// cannot be decoded you will get � instead for each inconvertible byte token
#[non_exhaustive]
pub struct ByteFallback {
#[serde(rename = "type")]
type_: MustBe!("ByteFallback"),
}
impl ByteFallback {
pub fn new() -> Self {
Self {
type_: MustBe!("ByteFallback"),
}
}
}
impl Decoder for ByteFallback {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
let mut new_tokens: Vec<String> = vec![];
let mut previous_byte_tokens: Vec<u8> = vec![];
for token in tokens {
let bytes = if token.len() == 6 && token.starts_with("<0x") && token.ends_with('>') {
if let Ok(byte) = u8::from_str_radix(&token[3..5], 16) {
Some(byte)
} else {
None
}
} else {
None
};
if let Some(bytes) = bytes {
previous_byte_tokens.push(bytes);
} else {
if !previous_byte_tokens.is_empty() {
if let Ok(string) = String::from_utf8(previous_byte_tokens.clone()) {
new_tokens.push(string);
} else {
for _ in 0..previous_byte_tokens.len() {
new_tokens.push("�".into());
}
}
previous_byte_tokens.clear();
}
new_tokens.push(token);
}
}
if !previous_byte_tokens.is_empty() {
if let Ok(string) = String::from_utf8(previous_byte_tokens.clone()) {
new_tokens.push(string);
} else {
for _ in 0..previous_byte_tokens.len() {
new_tokens.push("�".into());
}
}
}
Ok(new_tokens)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn decode() {
let decoder = ByteFallback::new();
let res = decoder
.decode_chain(vec!["Hey".into(), "friend!".into()])
.unwrap();
assert_eq!(res, vec!["Hey", "friend!"]);
let res = decoder.decode_chain(vec!["<0x61>".into()]).unwrap();
assert_eq!(res, vec!["a"]);
let res = decoder.decode_chain(vec!["<0xE5>".into()]).unwrap();
assert_eq!(res, vec!["�"]);
let res = decoder
.decode_chain(vec!["<0xE5>".into(), "<0x8f>".into()])
.unwrap();
assert_eq!(res, vec!["�", "�"]);
// 叫
let res = decoder
.decode_chain(vec!["<0xE5>".into(), "<0x8f>".into(), "<0xab>".into()])
.unwrap();
assert_eq!(res, vec!["叫"]);
let res = decoder
.decode_chain(vec![
"<0xE5>".into(),
"<0x8f>".into(),
"<0xab>".into(),
"a".into(),
])
.unwrap();
assert_eq!(res, vec!["叫", "a"]);
let res = decoder
.decode_chain(vec!["<0xE5>".into(), "<0x8f>".into(), "a".into()])
.unwrap();
assert_eq!(res, vec!["�", "�", "a"]);
}
}
| tokenizers/tokenizers/src/decoders/byte_fallback.rs/0 | {
"file_path": "tokenizers/tokenizers/src/decoders/byte_fallback.rs",
"repo_id": "tokenizers",
"token_count": 1938
} |
use super::{
lattice::Lattice,
trainer::UnigramTrainer,
trie::{Trie, TrieBuilder},
};
use crate::tokenizer::{Model, Result, Token};
use crate::utils::cache::{Cache, MAX_LENGTH};
use std::collections::HashMap;
use std::convert::TryInto;
use std::fs::read_to_string;
use std::path::{Path, PathBuf};
type TokenMap = HashMap<String, u32>;
type Vocab = Vec<(String, f64)>;
/// A `Unigram` model to encode sentences.
pub struct Unigram {
token_to_ids: TokenMap,
pub(crate) vocab: Vocab,
cache: Cache<String, Vec<String>>,
trie: Trie<u8>,
pub min_score: f64,
pub(super) unk_id: Option<usize>,
pub(super) bos_id: usize,
pub(super) eos_id: usize,
fuse_unk: bool,
is_optimized: bool,
byte_fallback: bool,
}
impl PartialEq for Unigram {
fn eq(&self, other: &Self) -> bool {
self.unk_id == other.unk_id && self.vocab == other.vocab
}
}
impl Clone for Unigram {
// `Clone` can't be derive because it's not implemented for `Cache`.
// To keep things simple when we clone, the new Unigram will start with a fresh cache.
fn clone(&self) -> Self {
let fresh_cache = self.cache.fresh();
Self {
vocab: self.vocab.clone(),
cache: fresh_cache,
token_to_ids: self.token_to_ids.clone(),
trie: self.trie.clone(),
min_score: self.min_score,
unk_id: self.unk_id,
bos_id: self.bos_id,
eos_id: self.eos_id,
fuse_unk: self.fuse_unk,
is_optimized: self.is_optimized,
byte_fallback: self.byte_fallback,
}
}
}
impl std::fmt::Debug for Unigram {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.debug_struct("Unigram")
.field("vocab", &self.vocab.len())
.field("unk_id", &self.unk_id)
.field("byte_fallback", &self.byte_fallback)
.finish()
}
}
static K_UNK_PENALTY: f64 = 10.0;
#[derive(thiserror::Error, Debug)]
pub enum UnigramError {
#[error("The vocabulary is empty but at least <unk> is needed")]
EmptyVocabulary,
#[error("The `unk_id` is larger than vocabulary size")]
UnkIdNotInVocabulary,
#[error("Encountered an unknown token but `unk_id` is missing")]
MissingUnkId,
}
impl Default for Unigram {
fn default() -> Self {
let vocab = vec![("<unk>".to_string(), 0.0)];
Self::from(vocab, Some(0), false).unwrap()
}
}
impl Unigram {
/// Create a `Unigram` model from a given vocabulary.
/// Vocabulary are the various tokens and their associated score which is a sort of a logprob of
/// their frequency, which will enable tokenization and sampling.
/// unk_id, is the index within the vocabulary.
/// For now `Unigram` *requires* at least `unk` because we might find a never seen char.
/// Further versions might allow that part to be hidden.
pub fn from(
vocab: Vec<(String, f64)>,
unk_id: Option<usize>,
byte_fallback: bool,
) -> Result<Self> {
let n = vocab.len();
let mut token_to_ids: TokenMap = HashMap::new();
let mut builder = TrieBuilder::default();
if let Some(unk_id) = unk_id {
if vocab.is_empty() {
return Err(Box::new(UnigramError::EmptyVocabulary));
}
if unk_id >= vocab.len() {
return Err(Box::new(UnigramError::UnkIdNotInVocabulary));
}
}
let bos_id = n + 1;
let eos_id = n + 2;
let mut min_score = f64::INFINITY;
for (id, (token, score)) in vocab.iter().enumerate() {
token_to_ids.insert(token.to_string(), id as u32);
let bytes: Vec<u8> = token.bytes().collect();
builder.push(&bytes);
if score < &min_score {
min_score = *score;
}
}
let trie = builder.build();
let fuse_unk = true;
let is_optimized = true;
Ok(Self {
vocab,
token_to_ids,
trie,
min_score,
bos_id,
eos_id,
unk_id,
fuse_unk,
cache: Cache::default(),
is_optimized,
byte_fallback,
})
}
#[cfg(test)]
pub(super) fn set_fuse_unk(&mut self, fuse_unk: bool) {
self.fuse_unk = fuse_unk;
self.cache = self.cache.fresh();
}
#[cfg(test)]
pub(super) fn set_optimized(&mut self, is_optimized: bool) {
self.is_optimized = is_optimized;
}
pub fn byte_fallback(&self) -> bool {
self.byte_fallback
}
pub(super) fn len(&self) -> usize {
self.vocab.len()
}
pub(super) fn populate_nodes(&self, lattice: &mut Lattice) {
let unk_score = self.min_score - K_UNK_PENALTY;
let len = lattice.len();
let mut begin_pos = 0;
while begin_pos < len {
let mblen = lattice.sentence[begin_pos..]
.chars()
.next()
.unwrap()
.len_utf8();
let mut has_single_node = false;
for bytes in self
.trie
.common_prefix_search(lattice.sentence.bytes().skip(begin_pos))
{
let n = bytes.len();
let tok = String::from_utf8(bytes).unwrap();
let id = *self.token_to_ids.get(&tok).unwrap();
let item = &self.vocab[id as usize];
assert_eq!(item.0, tok);
let score: f64 = item.1;
lattice.insert(begin_pos, n, score, id.try_into().unwrap());
if !has_single_node && n == mblen {
has_single_node = true;
}
}
if !has_single_node {
if let Some(unk_id) = self.unk_id {
lattice.insert(begin_pos, mblen, unk_score, unk_id);
}
}
begin_pos += mblen
}
}
/// This functions take a String, and will encode it in a Vec of Strings,
/// of the best tokenization available to the current model.
/// ```
/// use tokenizers::models::unigram::Unigram;
///
/// let pieces = vec![
/// ("<unk>".to_string(), 0.0),
/// ("a".to_string(), 0.0),
/// ("b".to_string(), 0.0),
/// ("c".to_string(), 0.0),
/// ("d".to_string(), 0.0),
/// ("cd".to_string(), 1.0),
/// ("ab".to_string(), 2.0),
/// ("abc".to_string(), 5.0),
/// ("abcd".to_string(), 10.0),
/// ];
/// let model = Unigram::from(pieces, Some(0), false).unwrap();
/// let result = model.encode("abcdacdxx").unwrap();
/// assert_eq!(result, vec!["abcd", "a", "cd", "xx"]);
/// ```
pub fn encode(&self, sentence: &str) -> Result<Vec<String>> {
if sentence.is_empty() {
return Ok(vec![]);
}
if let Some(result) = self.cache.get(sentence) {
Ok(result.to_vec())
} else {
let result = if self.is_optimized {
self.encode_optimized(sentence)?
} else {
self.encode_unoptimized(sentence)?
};
if sentence.len() < MAX_LENGTH {
self.cache.set(sentence.to_owned(), result.clone());
}
Ok(result)
}
}
fn encode_optimized(&self, sentence: &str) -> Result<Vec<String>> {
// https://github.com/google/sentencepiece/blob/d48247191a6d50e469ed1a4a36e877befffd1851/src/unigram_model.cc#L600
#[derive(Debug, Clone)]
struct BestPathNode {
/// The vocab id. (maybe UNK)
id: usize,
/// The total score of the best path ending at this node.
best_path_score: f64,
/// The starting position (in utf-8) of this node. The entire best
/// path can be constructed by backtracking along this link.
starts_at: Option<usize>,
}
impl Default for BestPathNode {
fn default() -> Self {
Self {
id: 0,
best_path_score: 0.0,
starts_at: None,
}
}
}
let size = sentence.len();
let unk_score = self.min_score - K_UNK_PENALTY;
let mut best_path_ends_at = vec![BestPathNode::default(); size + 1];
let mut starts_at = 0;
while starts_at < size {
let best_path_score_till_here = best_path_ends_at[starts_at].best_path_score;
let mut has_single_node = false;
let mblen = sentence[starts_at..].chars().next().unwrap().len_utf8();
for tok_bytes in self
.trie
.common_prefix_search(sentence.bytes().skip(starts_at))
{
let key_pos = starts_at + tok_bytes.len();
let token: String = String::from_utf8(tok_bytes).unwrap();
let target_node = &mut best_path_ends_at[key_pos];
let length = key_pos - starts_at;
let id = self.token_to_ids.get(&token).unwrap();
let score = self.vocab.get(*id as usize).unwrap().1;
let candidate_best_path_score = score + best_path_score_till_here;
if target_node.starts_at.is_none()
|| candidate_best_path_score > target_node.best_path_score
{
target_node.best_path_score = candidate_best_path_score;
target_node.starts_at = Some(starts_at);
target_node.id = *id as usize;
}
if !has_single_node && length == mblen {
has_single_node = true;
}
}
if !has_single_node {
let target_node = &mut best_path_ends_at[starts_at + mblen];
let candidate_best_path_score = unk_score + best_path_score_till_here;
if target_node.starts_at.is_none()
|| candidate_best_path_score > target_node.best_path_score
{
target_node.best_path_score = candidate_best_path_score;
target_node.starts_at = Some(starts_at);
target_node.id = self.unk_id.ok_or(UnigramError::MissingUnkId)?;
}
}
starts_at += mblen
}
let mut ends_at = size;
let mut results: Vec<String> = vec![];
let mut token = vec![];
while ends_at > 0 {
let node = &best_path_ends_at[ends_at];
let starts_at = node.starts_at.unwrap();
if self.fuse_unk
&& self.unk_id.is_some()
&& node.id == self.unk_id.ok_or(UnigramError::MissingUnkId)?
{
token.push(
String::from_utf8(sentence[starts_at..ends_at].as_bytes().to_vec()).unwrap(),
);
} else {
if !token.is_empty() {
token.reverse();
results.push(token.concat());
token = vec![];
}
results.push(
String::from_utf8(sentence[starts_at..ends_at].as_bytes().to_vec()).unwrap(),
);
}
ends_at = starts_at;
}
if !token.is_empty() {
token.reverse();
results.push(token.concat());
}
results.reverse();
Ok(results)
}
fn encode_unoptimized(&self, sentence: &str) -> Result<Vec<String>> {
let mut lattice = Lattice::from(sentence, self.bos_id, self.eos_id);
self.populate_nodes(&mut lattice);
if self.fuse_unk {
let mut results = vec![];
let mut token = String::new();
for node in lattice.viterbi().iter() {
let item = lattice.piece(&node.borrow());
if node.borrow().id == self.unk_id.ok_or(UnigramError::MissingUnkId)? {
token.push_str(&item);
} else {
if !token.is_empty() {
results.push(token);
token = String::new();
}
results.push(item.to_string());
}
}
if !token.is_empty() {
results.push(token);
}
Ok(results)
} else {
Ok(lattice.tokens())
}
}
/// Iterate of vocabulary of the model as a pair of `(token, score)`.
pub fn iter(&self) -> UnigramIterator {
UnigramIterator { model: self, i: 0 }
}
/// Loads a SentencePiece output model after being trained by tokenizers.
/// After that you can use the model with tokenizers library.
/// ```no_run
/// use tokenizers::models::unigram::Unigram;
/// use std::path::Path;
///
/// let model = Unigram::load("mymodel-unigram.json").unwrap();
/// ```
pub fn load<P: AsRef<Path>>(path: P) -> Result<Unigram> {
let string = read_to_string(path)?;
Ok(serde_json::from_str(&string)?)
}
/// Clears the internal cache
pub fn clear_cache(&mut self) {
self.cache.clear();
}
/// Resize the cache
pub fn resize_cache(&mut self, capacity: usize) {
self.cache.resize(capacity);
}
}
/// Iterator to iterate of vocabulary of the model, and their relative score.
pub struct UnigramIterator<'a> {
model: &'a Unigram,
i: usize,
}
impl<'a> Iterator for UnigramIterator<'a> {
type Item = &'a (String, f64);
fn next(&mut self) -> Option<Self::Item> {
let i = self.i;
if i < self.model.len() {
let r = Some(&self.model.vocab[i]);
self.i += 1;
r
} else {
None
}
}
}
impl Model for Unigram {
type Trainer = UnigramTrainer;
fn get_vocab(&self) -> HashMap<String, u32> {
self.token_to_ids.clone()
}
fn get_vocab_size(&self) -> usize {
self.vocab.len()
}
fn tokenize(&self, sentence: &str) -> Result<Vec<Token>> {
let str_tokens = self.encode(sentence)?;
let mut offset = 0;
let mut tokens = Vec::with_capacity(str_tokens.len());
for string in str_tokens {
let len = string.len();
let offsets = (offset, offset + len);
let id: u32 = match self.token_to_ids.get(&string) {
Some(id) => *id,
None => {
if self.byte_fallback {
let byte_tokens: Option<Vec<_>> = string
.bytes()
.map(|byte| -> Option<Token> {
let byte_string = format!("<0x{byte:02X}>");
let id = self.token_to_ids.get(&byte_string);
id.map(|id| Token::new(*id, byte_string, (offset, offset + len)))
})
.collect();
if let Some(byte_tokens) = byte_tokens {
for token in byte_tokens {
tokens.push(token);
}
offset += len;
continue;
}
}
self.unk_id.ok_or(UnigramError::MissingUnkId)? as u32
}
};
offset += len;
tokens.push(Token::new(id, string, offsets));
}
Ok(tokens)
}
fn token_to_id(&self, token: &str) -> Option<u32> {
self.token_to_ids.get(token).copied()
}
fn id_to_token(&self, id: u32) -> Option<String> {
self.vocab.get(id as usize).map(|item| item.0.clone())
}
fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> {
let name = match name {
Some(name) => format!("{name}-unigram.json"),
None => "unigram.json".to_string(),
};
let mut fullpath = PathBuf::new();
fullpath.push(folder);
fullpath.push(name);
let string = serde_json::to_string_pretty(self)?;
std::fs::write(&fullpath, string)?;
Ok(vec![fullpath])
}
fn get_trainer(&self) -> Self::Trainer {
UnigramTrainer::default()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_populate_nodes_unk() {
let pieces = vec![("<unk>".to_string(), 0.0)];
let model = Unigram::from(pieces, Some(0), false).unwrap();
let mut lattice = Lattice::from("abc", model.bos_id, model.eos_id);
model.populate_nodes(&mut lattice);
assert_eq!(lattice.begin_nodes[0].len(), 1);
assert_eq!(lattice.begin_nodes[1].len(), 1);
assert_eq!(lattice.begin_nodes[2].len(), 1);
assert_eq!(lattice.begin_nodes[0][0].borrow().id, 0);
assert_eq!(lattice.begin_nodes[1][0].borrow().id, 0);
assert_eq!(lattice.begin_nodes[2][0].borrow().id, 0);
assert_eq!(lattice.begin_nodes[0][0].borrow().node_id, 2);
assert_eq!(lattice.begin_nodes[1][0].borrow().node_id, 3);
assert_eq!(lattice.begin_nodes[2][0].borrow().node_id, 4);
}
#[test]
fn test_populate_nodes() {
let pieces = vec![
("<unk>".to_string(), 0.0),
("a".to_string(), 0.1),
("b".to_string(), 0.2),
("ab".to_string(), 0.3),
("bc".to_string(), 0.4),
];
let model = Unigram::from(pieces, Some(0), false).unwrap();
let mut lattice = Lattice::from("abc", model.bos_id, model.eos_id);
model.populate_nodes(&mut lattice);
assert_eq!(lattice.begin_nodes[0].len(), 2); // a, ab
assert_eq!(lattice.begin_nodes[1].len(), 2); // b, bc
assert_eq!(lattice.begin_nodes[2].len(), 1); // c(unk)
// Id is the vocabulary id from Unigram model
// node_id is simply the rank of the given node in the lattice.
assert_eq!(lattice.begin_nodes[0][0].borrow().id, 1);
assert_eq!(lattice.begin_nodes[0][1].borrow().id, 3);
assert_eq!(lattice.begin_nodes[1][0].borrow().id, 2);
assert_eq!(lattice.begin_nodes[1][1].borrow().id, 4);
assert_eq!(lattice.begin_nodes[2][0].borrow().id, 0);
assert_eq!(lattice.begin_nodes[0][0].borrow().node_id, 2);
assert_eq!(lattice.begin_nodes[0][1].borrow().node_id, 3);
assert_eq!(lattice.begin_nodes[1][0].borrow().node_id, 4);
assert_eq!(lattice.begin_nodes[1][1].borrow().node_id, 5);
assert_eq!(lattice.begin_nodes[2][0].borrow().node_id, 6);
}
#[test]
fn test_encode() {
let sentencepieces = vec![
("<unk>".to_string(), 0.0),
("a".to_string(), 0.0),
("b".to_string(), 0.0),
("c".to_string(), 0.0),
("d".to_string(), 0.0),
("cd".to_string(), 1.0),
("ab".to_string(), 2.0),
("abc".to_string(), 5.0),
("abcd".to_string(), 10.0),
];
let model = Unigram::from(sentencepieces, Some(0), false).unwrap();
let result = model.encode("abcd").unwrap();
assert_eq!(result, vec!["abcd"]);
}
#[test]
fn test_encode2() {
let sentencepieces = vec![
("<unk>".to_string(), 0.0),
("ab".to_string(), 0.0),
("cd".to_string(), -0.1),
("abc".to_string(), -0.2),
("a".to_string(), -0.3),
("b".to_string(), -0.4),
("c".to_string(), -0.5),
("ABC".to_string(), -0.5),
("abcdabcd".to_string(), 20.0), // User defined just max the scores.
("q".to_string(), 20.5),
("r".to_string(), 20.5),
("qr".to_string(), -0.5),
];
let mut model = Unigram::from(sentencepieces, Some(0), false).unwrap();
for is_optimized in &[true, false] {
model.set_optimized(*is_optimized);
println!("IsOptimized {is_optimized:?}");
assert_eq!(model.encode("abc").unwrap(), vec!["abc"]);
assert_eq!(model.encode("AB").unwrap(), vec!["AB"]);
model.set_fuse_unk(false);
assert_eq!(model.encode("AB").unwrap(), vec!["A", "B"]);
model.set_fuse_unk(true);
assert_eq!(model.encode("AB").unwrap(), vec!["AB"]);
assert_eq!(model.encode("abcd").unwrap(), vec!["ab", "cd"]);
assert_eq!(model.encode("abcc").unwrap(), vec!["abc", "c"]);
assert_eq!(
model.encode("xabcabaabcdd").unwrap(),
vec!["x", "abc", "ab", "a", "ab", "cd", "d"]
);
model.set_fuse_unk(false);
assert_eq!(
model.encode("xyz東京").unwrap(),
vec!["x", "y", "z", "東", "京"]
);
model.set_fuse_unk(true);
assert_eq!(model.encode("xyz東京").unwrap(), vec!["xyz東京"]);
// User encoded in original version
assert_eq!(model.encode("ABC").unwrap(), vec!["ABC"]);
assert_eq!(model.encode("abABCcd").unwrap(), vec!["ab", "ABC", "cd"]);
assert_eq!(
model.encode("ababcdabcdcd").unwrap(),
vec!["ab", "abcdabcd", "cd"]
);
assert_eq!(model.encode("abqrcd").unwrap(), vec!["ab", "q", "r", "cd"]);
}
}
#[test]
fn test_unigram_bytefallback() {
// In [97]: processor.encode_as_pieces("⅐⅛⅑ ")
// Out[97]: ['▁', '<0xE2>', '<0x85>', '<0x90>', '⅛', '<0xE2>', '<0x85>', '<0x91>', '▁']
let sentencepieces = vec![
("<unk>".to_string(), 0.0),
("<0xC3>".to_string(), -0.01),
("<0xA9>".to_string(), -0.03),
];
let unigram = Unigram::from(sentencepieces, Some(0), true).unwrap();
let tokens: Vec<Token> = unigram.tokenize("é").unwrap();
assert_eq!(
tokens,
[
Token {
id: 1,
value: "<0xC3>".to_string(),
offsets: (0, 2)
},
Token {
id: 2,
value: "<0xA9>".to_string(),
offsets: (0, 2)
}
]
);
let tokens = unigram.tokenize("?é").unwrap();
assert_eq!(tokens[0].id, 0);
}
}
| tokenizers/tokenizers/src/models/unigram/model.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/unigram/model.rs",
"repo_id": "tokenizers",
"token_count": 12037
} |
use crate::tokenizer::{NormalizedString, Normalizer, Result};
use crate::utils::macro_rules_attribute;
use serde::{Deserialize, Serialize};
use unicode_normalization_alignments::char::is_combining_mark;
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
#[serde(tag = "type")]
#[non_exhaustive]
pub struct Strip {
pub strip_left: bool,
pub strip_right: bool,
}
impl Strip {
pub fn new(strip_left: bool, strip_right: bool) -> Self {
Self {
strip_left,
strip_right,
}
}
}
impl Normalizer for Strip {
/// Strip the normalized string inplace
fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> {
if self.strip_left && self.strip_right {
// Fast path
normalized.strip();
} else {
if self.strip_left {
normalized.lstrip();
}
if self.strip_right {
normalized.rstrip();
}
}
Ok(())
}
}
// This normalizer removes combining marks from a normalized string
// It's different from unidecode as it does not attempt to modify
// non ascii languages.
#[derive(Copy, Clone, Debug)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct StripAccents;
impl Normalizer for StripAccents {
/// Strip the normalized string inplace
fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> {
normalized.filter(|c| !is_combining_mark(c));
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::normalizer::NormalizedString;
use crate::normalizers::Lowercase;
use crate::normalizers::NFKD;
use unicode_normalization_alignments::UnicodeNormalization;
#[test]
fn test_strip_accents() {
// Unicode combining char
let original: String = "Me llamó".nfkd().map(|(c, _)| c).collect();
let normalized = "Me llamo";
assert_ne!(original, normalized);
let mut n = NormalizedString::from(original);
StripAccents.normalize(&mut n).unwrap();
assert_eq!(&n.get(), &normalized);
// Ignores regular ascii
let original = "Me llamo";
let normalized = "Me llamo";
assert_eq!(original, normalized);
let mut n = NormalizedString::from(original);
StripAccents.normalize(&mut n).unwrap();
assert_eq!(&n.get(), &normalized);
// Does not change chinese
let original: String = "这很简单".nfkd().map(|(c, _)| c).collect();
let normalized = "这很简单";
assert_eq!(original, normalized);
let mut n = NormalizedString::from(original);
StripAccents.normalize(&mut n).unwrap();
assert_eq!(&n.get(), &normalized);
}
#[test]
fn test_vietnamese_bug() {
let original: String = "ậ…".to_string();
let normalized = "a...".to_string();
assert_ne!(original, normalized);
let mut n = NormalizedString::from(original);
NFKD.normalize(&mut n).unwrap();
StripAccents.normalize(&mut n).unwrap();
assert_eq!(&n.get(), &normalized);
Lowercase.normalize(&mut n).unwrap();
assert_eq!(&n.get(), &normalized);
let original: String = "Cụ thể, bạn sẽ tham gia một nhóm các giám đốc điều hành tổ chức, các nhà lãnh đạo doanh nghiệp, các học giả, chuyên gia phát triển và tình nguyện viên riêng biệt trong lĩnh vực phi lợi nhuận…".to_string();
let normalized = "cu the, ban se tham gia mot nhom cac giam đoc đieu hanh to chuc, cac nha lanh đao doanh nghiep, cac hoc gia, chuyen gia phat trien va tinh nguyen vien rieng biet trong linh vuc phi loi nhuan...".to_string();
let mut n = NormalizedString::from(original);
NFKD.normalize(&mut n).unwrap();
StripAccents.normalize(&mut n).unwrap();
Lowercase.normalize(&mut n).unwrap();
assert_eq!(&n.get(), &normalized);
}
#[test]
fn test_thai_bug() {
let original = "ำน\u{e49}ำ3ลำ".to_string();
let normalized = "านา3ลา".to_string();
assert_ne!(original, normalized);
let mut n = NormalizedString::from(original);
NFKD.normalize(&mut n).unwrap();
StripAccents.normalize(&mut n).unwrap();
Lowercase.normalize(&mut n).unwrap();
assert_eq!(&n.get(), &normalized);
}
#[test]
fn test_strip_accents_multiple() {
let original = "e\u{304}\u{304}\u{304}o";
let normalized = "eo";
assert_ne!(original, normalized);
let mut n = NormalizedString::from(original);
StripAccents.normalize(&mut n).unwrap();
assert_eq!(&n.get(), &normalized);
assert_eq!(
n,
NormalizedString::new(
original.to_string(),
normalized.to_string(),
vec![(0, 1), (7, 8)],
0
)
);
assert_eq!(
n.alignments_original(),
vec![
(0, 1),
(1, 1),
(1, 1),
(1, 1),
(1, 1),
(1, 1),
(1, 1),
(1, 2)
]
);
}
}
| tokenizers/tokenizers/src/normalizers/strip.rs/0 | {
"file_path": "tokenizers/tokenizers/src/normalizers/strip.rs",
"repo_id": "tokenizers",
"token_count": 2512
} |
use crate::tokenizer::{Encoding, PostProcessor, Result};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::iter::FromIterator;
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
#[serde(tag = "type")]
pub struct BertProcessing {
pub sep: (String, u32),
pub cls: (String, u32),
}
impl Default for BertProcessing {
fn default() -> Self {
Self {
sep: ("[SEP]".into(), 102),
cls: ("[CLS]".into(), 101),
}
}
}
impl BertProcessing {
pub fn new(sep: (String, u32), cls: (String, u32)) -> Self {
Self { sep, cls }
}
pub fn get_sep_copy(&self) -> (String, u32) {
(self.sep.0.clone(), self.sep.1)
}
pub fn get_cls_copy(&self) -> (String, u32) {
(self.cls.0.clone(), self.cls.1)
}
}
#[derive(thiserror::Error, Debug)]
pub enum BertProcessorError {
#[error("encodings vector length must be either 1 or 2")]
InvalidEncodingsVecLength,
}
impl PostProcessor for BertProcessing {
fn added_tokens(&self, is_pair: bool) -> usize {
if is_pair {
3
} else {
2
}
}
fn process_encodings(
&self,
mut encodings: Vec<Encoding>,
add_special_tokens: bool,
) -> Result<Vec<Encoding>> {
if !add_special_tokens {
return Ok(encodings);
}
let encodings: Vec<Encoding> = encodings
.iter_mut()
.enumerate()
.map(|(i, encoding)| {
if i == 0 {
let ids = [&[self.cls.1], encoding.get_ids(), &[self.sep.1]].concat();
let type_ids = [&[0], encoding.get_type_ids(), &[0]].concat();
let tokens = [
&[self.cls.0.clone()],
encoding.get_tokens(),
&[self.sep.0.clone()],
]
.concat();
let words = [&[None], encoding.get_word_ids(), &[None]].concat();
let offsets = [&[(0, 0)], encoding.get_offsets(), &[(0, 0)]].concat();
let special_tokens =
[&[1u32], &vec![0; encoding.get_ids().len()][..], &[1]].concat();
let attention_mask = vec![1; ids.len()];
// For compatibility with `TemplateProcessing`, the sequence_ranges shouldn't contain
// the special tokens.
let sequence_ranges = HashMap::from_iter(vec![(0, 1..ids.len() - 1)]);
Encoding::new(
ids,
type_ids,
tokens,
words,
offsets,
special_tokens,
attention_mask,
encoding
.take_overflowing()
.into_iter()
.map(|encoding| {
let ids =
[&[self.cls.1], encoding.get_ids(), &[self.sep.1]].concat();
let type_ids = [&[0], encoding.get_type_ids(), &[0]].concat();
let tokens = [
&[self.cls.0.clone()],
encoding.get_tokens(),
&[self.sep.0.clone()],
]
.concat();
let words = [&[None], encoding.get_word_ids(), &[None]].concat();
let offsets =
[&[(0, 0)], encoding.get_offsets(), &[(0, 0)]].concat();
let special_tokens =
[&[1u32], &vec![0; encoding.get_ids().len()][..], &[1]]
.concat();
let attention_mask = vec![1; ids.len()];
// For compatibility with `TemplateProcessing`, the sequence_ranges shouldn't
// contain the special tokens.
let sequence_ranges =
HashMap::from_iter(vec![(0, 1..ids.len() - 1)]);
Encoding::new(
ids,
type_ids,
tokens,
words,
offsets,
special_tokens,
attention_mask,
vec![],
sequence_ranges,
)
})
.collect(),
sequence_ranges,
)
} else {
let pair_ids = [encoding.get_ids(), &[self.sep.1]].concat();
let pair_type_ids = [encoding.get_type_ids(), &[1]].concat();
let pair_tokens = [encoding.get_tokens(), &[self.sep.0.clone()]].concat();
let pair_words = [encoding.get_word_ids(), &[None]].concat();
let pair_offsets = [encoding.get_offsets(), &[(0, 0)]].concat();
let pair_special_tokens =
[&vec![0u32; encoding.get_type_ids().len()][..], &[1]].concat();
let pair_attention_mask = vec![1; pair_ids.len()];
// For compatibility with `TemplateProcessing`, the sequence_ranges shouldn't contain
// the special tokens.
let pair_sequence_ranges = HashMap::from_iter(vec![(1, 0..pair_ids.len() - 1)]);
Encoding::new(
pair_ids,
pair_type_ids,
pair_tokens,
pair_words,
pair_offsets,
pair_special_tokens,
pair_attention_mask,
encoding
.take_overflowing()
.into_iter()
.map(|encoding| {
let pair_ids = [encoding.get_ids(), &[self.sep.1]].concat();
let pair_type_ids = [encoding.get_type_ids(), &[1]].concat();
let pair_tokens =
[encoding.get_tokens(), &[self.sep.0.clone()]].concat();
let pair_words = [encoding.get_word_ids(), &[None]].concat();
let pair_offsets = [encoding.get_offsets(), &[(0, 0)]].concat();
let pair_special_tokens =
[&vec![0u32; encoding.get_type_ids().len()][..], &[1]].concat();
let pair_attention_mask = vec![1; pair_ids.len()];
// For compatibility with `TemplateProcessing`, the sequence_ranges
// shouldn't contain the special tokens.
let pair_sequence_ranges =
HashMap::from_iter(vec![(1, 0..pair_ids.len() - 1)]);
Encoding::new(
pair_ids,
pair_type_ids,
pair_tokens,
pair_words,
pair_offsets,
pair_special_tokens,
pair_attention_mask,
vec![],
pair_sequence_ranges,
)
})
.collect(),
pair_sequence_ranges,
)
}
})
.collect();
Ok(encodings)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn serde() {
let bert = BertProcessing::default();
let bert_r = r#"{"type":"BertProcessing","sep":["[SEP]",102],"cls":["[CLS]",101]}"#;
assert_eq!(serde_json::to_string(&bert).unwrap(), bert_r);
assert_eq!(
serde_json::from_str::<BertProcessing>(bert_r).unwrap(),
bert
);
}
#[test]
fn bert_processing() {
let processor = BertProcessing::default();
assert_eq!(processor.added_tokens(false), 2);
assert_eq!(processor.added_tokens(true), 3);
use crate::Token;
let encoding = Encoding::from_tokens(
vec![
Token::new(12, "Hello".into(), (0, 5)),
Token::new(14, "there".into(), (6, 11)),
],
0,
);
let pair = Encoding::from_tokens(vec![Token::new(15, "pair".into(), (0, 4))], 0);
let single_encoding = processor.process(encoding.clone(), None, true).unwrap();
assert_eq!(
single_encoding,
Encoding::new(
vec![101, 12, 14, 102],
vec![0, 0, 0, 0],
vec![
"[CLS]".into(),
"Hello".into(),
"there".into(),
"[SEP]".into()
],
vec![None, None, None, None],
vec![(0, 0), (0, 5), (6, 11), (0, 0)],
vec![1, 0, 0, 1],
vec![1, 1, 1, 1],
vec![],
HashMap::from_iter(vec![(0, 1..3)]),
)
);
assert_eq!(single_encoding.token_to_sequence(2), Some(0));
assert_eq!(single_encoding.token_to_sequence(3), None);
let pair_encoding = processor
.process(encoding.clone(), Some(pair.clone()), true)
.unwrap();
assert_eq!(
pair_encoding,
Encoding::new(
vec![101, 12, 14, 102, 15, 102],
vec![0, 0, 0, 0, 1, 1],
vec![
"[CLS]".into(),
"Hello".into(),
"there".into(),
"[SEP]".into(),
"pair".into(),
"[SEP]".into()
],
vec![None, None, None, None, None, None],
vec![(0, 0), (0, 5), (6, 11), (0, 0), (0, 4), (0, 0)],
vec![1, 0, 0, 1, 0, 1],
vec![1, 1, 1, 1, 1, 1],
vec![],
HashMap::from_iter(vec![(0, 1..3), (1, 4..5)]),
)
);
assert_eq!(pair_encoding.token_to_sequence(2), Some(0));
assert_eq!(pair_encoding.token_to_sequence(3), None);
assert_eq!(pair_encoding.token_to_sequence(4), Some(1));
assert_eq!(pair_encoding.token_to_sequence(5), None);
// No special tokens
let pair_encoding = processor.process(encoding, Some(pair), false).unwrap();
assert_eq!(
pair_encoding,
Encoding::new(
vec![12, 14, 15],
vec![0, 0, 1],
vec!["Hello".into(), "there".into(), "pair".into(),],
vec![None, None, None],
vec![(0, 5), (6, 11), (0, 4)],
vec![0, 0, 0],
vec![1, 1, 1],
vec![],
HashMap::from_iter(vec![(0, 0..2), (1, 2..3)]),
)
);
assert_eq!(pair_encoding.token_to_sequence(0), Some(0));
assert_eq!(pair_encoding.token_to_sequence(1), Some(0));
assert_eq!(pair_encoding.token_to_sequence(2), Some(1));
}
}
| tokenizers/tokenizers/src/processors/bert.rs/0 | {
"file_path": "tokenizers/tokenizers/src/processors/bert.rs",
"repo_id": "tokenizers",
"token_count": 7483
} |
pub(crate) mod cache;
#[cfg(feature = "http")]
pub(crate) mod from_pretrained;
#[cfg(feature = "unstable_wasm")]
mod fancy;
#[cfg(feature = "unstable_wasm")]
pub use fancy::SysRegex;
#[cfg(not(feature = "unstable_wasm"))]
mod onig;
#[cfg(not(feature = "unstable_wasm"))]
pub use crate::utils::onig::SysRegex;
pub mod iter;
pub mod padding;
pub mod parallelism;
pub(crate) mod progress;
pub mod truncation;
use serde::{Serialize, Serializer};
use std::collections::{BTreeMap, HashMap};
pub(crate) fn ordered_map<S, K, V>(
value: &HashMap<K, V>,
serializer: S,
) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
K: Serialize + std::cmp::Ord,
V: Serialize,
{
let ordered: BTreeMap<_, _> = value.iter().collect();
ordered.serialize(serializer)
}
macro_rules! impl_enum_from (
($from_ty:ty, $enum:ty, $variant:ident) => {
impl From<$from_ty> for $enum {
fn from(from: $from_ty) -> Self {
<$enum>::$variant(from)
}
}
}
);
/// Implement `serde::{Serialize, Serializer}` with `#[serde(tag = "type")]` attribute for a given struct.
/// Panic when a json string being deserilized misses field `type`.
///
/// # Examples
///
/// ```
/// # #[macro_use] extern crate tokenizers;
/// use serde::{Serialize, Deserialize};
///
/// fn main() {
/// impl_serde_type!{
/// #[derive(Debug)]
/// struct Point {
/// x: i32,
/// #[serde(default = "default_y")]
/// y: i32,
/// }
/// }
/// fn default_y() -> i32 {
/// 5
/// }
///
/// let point = Point { x: 1, y: 2 };
/// let serialized_s = r#"{"type":"Point","x":1,"y":2}"#;
/// assert_eq!(serde_json::to_string(&point).unwrap(), serialized_s);
/// }
/// ```
///
/// ```should_panic
/// # #[macro_use] extern crate tokenizers;
/// use serde::{Serialize, Deserialize};
///
/// fn main() {
/// impl_serde_type!{
/// #[derive(Debug)]
/// struct Point1D {
/// x: i32,
/// }
/// }
///
/// let serialized_s = r#"{"x":1}"#;
/// let deserialized: Point1D = serde_json::from_str(serialized_s).unwrap();
/// }
/// ```
///
/// # Examples (unit structs)
///
/// ```
/// # #[macro_use] extern crate tokenizers;
/// use serde::{Serialize, Deserialize};
///
/// fn main() {
/// impl_serde_type!{
/// struct Unit;
/// }
///
/// let unit = Unit;
/// let serialized_s = r#"{"type":"Unit"}"#;
/// assert_eq!(serde_json::to_string(&unit).unwrap(), serialized_s);
/// }
/// ```
///
/// ```should_panic
/// # #[macro_use] extern crate tokenizers;
/// use serde::{Serialize, Deserialize};
///
/// fn main() {
/// impl_serde_type!{
/// struct Unit;
/// }
///
/// let serialized_s = r#"{"some_field":1}"#;
/// let deserialized: Unit = serde_json::from_str(serialized_s).unwrap();
/// }
/// ```
#[macro_export]
macro_rules! impl_serde_type{
(
$(#[$meta:meta])*
$vis:vis struct $struct_name:ident {
$(
$(#[$field_meta:meta])*
$field_vis:vis $field_name:ident : $field_type:ty
),*$(,)+
}
) => {
paste::paste!{
$(#[$meta])*
#[derive(Serialize, Deserialize)]
#[serde(tag = "type", from = $struct_name "Deserializer")]
$vis struct $struct_name{
$(
$(#[$field_meta])*
$field_vis $field_name : $field_type,
)*
}
#[doc(hidden)]
$(#[$meta])*
#[derive(Deserialize)]
#[serde(tag = "type", remote = $struct_name "")]
struct [<$struct_name Def>]{
$(
$(#[$field_meta])*
$field_vis $field_name : $field_type,
)*
}
#[doc(hidden)]
#[derive(Deserialize)]
enum [<$struct_name Type>] {
$struct_name,
}
#[doc(hidden)]
#[derive(Deserialize)]
struct [<$struct_name Deserializer>] {
#[allow(dead_code)]
r#type: [<$struct_name Type>],
#[serde(flatten, with = $struct_name "Def")]
r#struct: $struct_name,
}
#[doc(hidden)]
impl std::convert::From<[<$struct_name Deserializer>]> for $struct_name {
fn from(v: [<$struct_name Deserializer>]) -> Self {
v.r#struct
}
}
}
};
(
$(#[$meta:meta])*
$vis:vis struct $struct_name:ident;
) => {
paste::paste!{
$(#[$meta])*
$vis struct $struct_name;
impl serde::Serialize for $struct_name {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> where
S: serde::ser::Serializer {
let helper = [<$struct_name Helper>]{r#type: [<$struct_name Type>]::$struct_name};
helper.serialize(serializer)
}
}
impl<'de> serde::Deserialize<'de> for $struct_name {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let _helper = [<$struct_name Helper>]::deserialize(deserializer)?;
Ok($struct_name)
}
}
#[derive(serde::Serialize, serde::Deserialize)]
enum [<$struct_name Type>] {
$struct_name,
}
#[derive(serde::Serialize, serde::Deserialize)]
struct [<$struct_name Helper>] {
#[allow(dead_code)]
r#type: [<$struct_name Type>],
}
}
}
}
// Re-export macro_rules_attribute
pub use macro_rules_attribute::macro_rules_attribute;
| tokenizers/tokenizers/src/utils/mod.rs/0 | {
"file_path": "tokenizers/tokenizers/src/utils/mod.rs",
"repo_id": "tokenizers",
"token_count": 3092
} |
// Based on [this tutorial](https://github.com/jsdoc2md/jsdoc-to-markdown/wiki/How-to-create-one-output-file-per-class).
import fs from 'fs';
import path from 'path';
import url from 'url';
import jsdoc2md from 'jsdoc-to-markdown';
const docs = path.dirname(path.dirname(url.fileURLToPath(import.meta.url)));
const root = path.dirname(docs);
// jsdoc config file
const conf = path.join(docs, 'jsdoc-conf.json');
// input and output paths
const inputFile = path.join(root, '/src/**/*.js');
const outputDir = path.join(root, '/docs/source/api/');
// get template data
const templateData = await jsdoc2md.getTemplateData({
files: inputFile,
configure: conf
})
// reduce templateData to an array of module names
const moduleNames = templateData.reduce(
(moduleNames, identifier) => {
if (identifier.kind === 'module') {
moduleNames.push(identifier.name)
}
return moduleNames
}, []
)
// create a documentation file for each module
for (const moduleName of moduleNames) {
const template = `{{#module name="${moduleName}"}}{{>docs}}{{/module}}`;
console.log(`rendering ${moduleName}, template: ${template}`);
let output = await jsdoc2md.render({
'data': templateData,
'template': template,
'heading-depth': 1,
'no-gfm': true,
'name-format': 'backticks',
'no-cache': true,
'separators': true,
'configure': conf,
});
// Post-processing
output = output.replace(/(^#+\s.+)/gm, '$1\n'); // Add new line after each header
// Replace all generated marker names with ids (for linking), and add group class
output = output.replace(/<a name="(\S+)"><\/a>/g, '<a id="$1" class="group"></a>');
// Unescape some of the characters which jsdoc2md escapes:
// TODO: May need to extend this list
output = output.replace(/\\([|_&*])/gm, '$1');
output = output.replaceAll('new exports.', 'new ');
let outputPath = path.resolve(outputDir, `${moduleName}.md`);
fs.mkdirSync(path.dirname(outputPath), { recursive: true });
fs.writeFileSync(outputPath, output);
}
| transformers.js/docs/scripts/generate.js/0 | {
"file_path": "transformers.js/docs/scripts/generate.js",
"repo_id": "transformers.js",
"token_count": 790
} |
# Transformers.js - Sample Electron application
An example project to show how to run 🤗 Transformers in an [Electron](https://www.electronjs.org/) application.
## Getting Started
1. Clone the repo and enter the project directory:
```bash
git clone https://github.com/huggingface/transformers.js.git
cd transformers.js/examples/electron/
```
1. Install the necessary dependencies:
```bash
npm install
```
1. Run the application:
```bash
npm run start
```
After a few seconds, a new window should pop up on your screen!
## Editing the template
All source code can be found in `./src/`:
- `index.js` - Serves as the entry point for the application's main process. When an Electron app is launched, this is the first file that gets executed, and it is responsible for setting up the main process of the application. You will need to restart the application after editing this file for your changes to take effect.
- `preload.js` - Used to preload scripts and modules in a renderer process before any other scripts run. In our case, we use the `contextBridge` API to expose the `run` function to the renderer, which runs the model in the background. You will need to restart the application after editing this file for your changes to take effect.
- `model.js` - Contains all the logic for loading the model and running predictions. You will need to restart the application after editing this file for your changes to take effect.
- `client.js` - Handles interaction with the interface, as well as communication between the renderer thread (UI) and the worker thread (processing). To see changes made to this file made while editing, simply refresh the window (<kbd>Ctrl + R</kbd> or "View" → "Reload").
- `index.html`, `index.css` - The user interface which is displayed to the user. To see changes made to this file made while editing, simply refresh the window (<kbd>Ctrl + R</kbd> or "View" → "Reload").
| transformers.js/examples/electron/README.md/0 | {
"file_path": "transformers.js/examples/electron/README.md",
"repo_id": "transformers.js",
"token_count": 528
} |
// background.js - Handles requests from the UI, runs the model, then sends back a response
import { pipeline, env } from '@xenova/transformers';
// Skip initial check for local models, since we are not loading any local models.
env.allowLocalModels = false;
// Due to a bug in onnxruntime-web, we must disable multithreading for now.
// See https://github.com/microsoft/onnxruntime/issues/14445 for more information.
env.backends.onnx.wasm.numThreads = 1;
class PipelineSingleton {
static task = 'text-classification';
static model = 'Xenova/distilbert-base-uncased-finetuned-sst-2-english';
static instance = null;
static async getInstance(progress_callback = null) {
if (this.instance === null) {
this.instance = pipeline(this.task, this.model, { progress_callback });
}
return this.instance;
}
}
// Create generic classify function, which will be reused for the different types of events.
const classify = async (text) => {
// Get the pipeline instance. This will load and build the model when run for the first time.
let model = await PipelineSingleton.getInstance((data) => {
// You can track the progress of the pipeline creation here.
// e.g., you can send `data` back to the UI to indicate a progress bar
// console.log('progress', data)
});
// Actually run the model on the input text
let result = await model(text);
return result;
};
////////////////////// 1. Context Menus //////////////////////
//
// Add a listener to create the initial context menu items,
// context menu items only need to be created at runtime.onInstalled
chrome.runtime.onInstalled.addListener(function () {
// Register a context menu item that will only show up for selection text.
chrome.contextMenus.create({
id: 'classify-selection',
title: 'Classify "%s"',
contexts: ['selection'],
});
});
// Perform inference when the user clicks a context menu
chrome.contextMenus.onClicked.addListener(async (info, tab) => {
// Ignore context menu clicks that are not for classifications (or when there is no input)
if (info.menuItemId !== 'classify-selection' || !info.selectionText) return;
// Perform classification on the selected text
let result = await classify(info.selectionText);
// Do something with the result
chrome.scripting.executeScript({
target: { tabId: tab.id }, // Run in the tab that the user clicked in
args: [result], // The arguments to pass to the function
function: (result) => { // The function to run
// NOTE: This function is run in the context of the web page, meaning that `document` is available.
console.log('result', result)
console.log('document', document)
},
});
});
//////////////////////////////////////////////////////////////
////////////////////// 2. Message Events /////////////////////
//
// Listen for messages from the UI, process it, and send the result back.
chrome.runtime.onMessage.addListener((message, sender, sendResponse) => {
console.log('sender', sender)
if (message.action !== 'classify') return; // Ignore messages that are not meant for classification.
// Run model prediction asynchronously
(async function () {
// Perform classification
let result = await classify(message.text);
// Send response back to UI
sendResponse(result);
})();
// return true to indicate we will send a response asynchronously
// see https://stackoverflow.com/a/46628145 for more information
return true;
});
//////////////////////////////////////////////////////////////
| transformers.js/examples/extension/src/background.js/0 | {
"file_path": "transformers.js/examples/extension/src/background.js",
"repo_id": "transformers.js",
"token_count": 1164
} |
/** @type {import('tailwindcss').Config} */
module.exports = {
content: [
'./src/pages/**/*.{js,ts,jsx,tsx,mdx}',
'./src/components/**/*.{js,ts,jsx,tsx,mdx}',
'./src/app/**/*.{js,ts,jsx,tsx,mdx}',
],
theme: {
extend: {
backgroundImage: {
'gradient-radial': 'radial-gradient(var(--tw-gradient-stops))',
'gradient-conic':
'conic-gradient(from 180deg at 50% 50%, var(--tw-gradient-stops))',
},
},
},
plugins: [],
}
| transformers.js/examples/next-client/tailwind.config.js/0 | {
"file_path": "transformers.js/examples/next-client/tailwind.config.js",
"repo_id": "transformers.js",
"token_count": 236
} |
{
"name": "segment-anything-client",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "vite build",
"preview": "vite preview"
},
"dependencies": {
"@huggingface/transformers": "^3.0.0-alpha.0"
},
"devDependencies": {
"vite": "^5.2.9"
}
}
| transformers.js/examples/segment-anything-client/package.json/0 | {
"file_path": "transformers.js/examples/segment-anything-client/package.json",
"repo_id": "transformers.js",
"token_count": 152
} |
export const SPEAKERS = {
"US female 1": "cmu_us_slt_arctic-wav-arctic_a0001",
"US female 2": "cmu_us_clb_arctic-wav-arctic_a0001",
"US male 1": "cmu_us_bdl_arctic-wav-arctic_a0003",
"US male 2": "cmu_us_rms_arctic-wav-arctic_a0003",
"Canadian male": "cmu_us_jmk_arctic-wav-arctic_a0002",
"Scottish male": "cmu_us_awb_arctic-wav-arctic_b0002",
"Indian male": "cmu_us_ksp_arctic-wav-arctic_a0007",
}
export const DEFAULT_SPEAKER = "cmu_us_slt_arctic-wav-arctic_a0001";
| transformers.js/examples/text-to-speech-client/src/constants.js/0 | {
"file_path": "transformers.js/examples/text-to-speech-client/src/constants.js",
"repo_id": "transformers.js",
"token_count": 247
} |
import { Fragment } from 'react';
const COLOURS = [
'bg-purple-300',
'bg-green-300',
'bg-yellow-300',
'bg-red-300',
'bg-blue-300',
]
export function Token({ text, position, margin }) {
const textWithLineBreaks = text.split('\n').map((line, index, array) => (
<Fragment key={index}>
{line}
{index !== array.length - 1 && <br />}
</Fragment>
));
return (<span
style={{ marginLeft: margin }}
className={`leading-5 ${textWithLineBreaks.length === 1 ? 'inline-block ' : ''}${COLOURS[position % COLOURS.length]}`}>
{textWithLineBreaks}
</span>)
} | transformers.js/examples/tokenizer-playground/src/components/Token.jsx/0 | {
"file_path": "transformers.js/examples/tokenizer-playground/src/components/Token.jsx",
"repo_id": "transformers.js",
"token_count": 287
} |
from enum import Enum
from tqdm import tqdm
from typing import Set, List, Optional
import onnx
import os
from dataclasses import dataclass, field
from transformers import HfArgumentParser
from optimum.onnx.graph_transformations import check_and_save_model
from onnxruntime.quantization import QuantType, QuantizationMode
from onnxruntime.quantization.onnx_quantizer import ONNXQuantizer
from onnxruntime.quantization.registry import IntegerOpsRegistry
from onnxruntime.quantization.matmul_4bits_quantizer import MatMul4BitsQuantizer
from onnxruntime.quantization.matmul_bnb4_quantizer import MatMulBnb4Quantizer
from onnxconverter_common import float16
import onnx_graphsurgeon as gs
class QuantMode(Enum):
# F32 = 'fp32'
FP16 = "fp16"
Q8 = "q8"
QI8 = "int8"
QU8 = "uint8"
Q4 = "q4"
Q4F16 = "q4f16"
BNB4 = "bnb4"
QUANTIZE_SUFFIX_MAPPING = {
QuantMode.Q8: "quantized",
}
QUANTIZE_OPTIONS = tuple(x.value for x in QuantMode)
# A list of operators that, when detected in a model, should select QUInt8 as the weight type for 8-bit quantization.
QUINT8_OPS = (
# NOTE:
# As of 2024/11/29, the latest version of onnxruntime-web is 1.20.1, and does not support INT8 weights for Conv layers.
# If you attempt to run a model with INT8 weights for Conv layers, you will get an error like:
# `Can't create a session. ERROR_CODE: 9, ERROR_MESSAGE: Could not find an implementation for ConvInteger(10) node with name '/.../Conv_quant'`
#
# For this reason, we choose model weight types to ensure compatibility with onnxruntime-web.
#
# As per docs, signed weight type (QInt8) is faster on most CPUs, so, we use that unless the model contains a Conv layer.
# For more information, see:
# - https://github.com/microsoft/onnxruntime/issues/3130#issuecomment-1105200621
# - https://github.com/microsoft/onnxruntime/issues/2339
"Conv",
# Models produced by onnxruntime-genai contain optimized operators that perform better with QUInt8 weights.
"GroupQueryAttention",
"MultiHeadAttention",
# TODO: "SimplifiedLayerNormalization", "SkipSimplifiedLayerNormalization"
)
@dataclass
class IOArguments:
"""
Arguments to specify input and output folders
"""
input_folder: str = field(
metadata={
"help": "Path of the input folder containing the .onnx models to quantize"
}
)
output_folder: str = field(
metadata={
"help": "Path of the output folder where the quantized .onnx models will be saved"
}
)
@dataclass
class QuantizationArguments:
"""
Arguments for quantizing ONNX models
"""
modes: QuantMode = field(
default=QUANTIZE_OPTIONS,
metadata={
"help": "Quantization mode to use.",
"choices": QUANTIZE_OPTIONS,
"nargs": "+",
},
)
# 8-bit quantization
per_channel: bool = field(
default=None, metadata={"help": "Whether to quantize weights per channel"}
)
reduce_range: bool = field(
default=None,
metadata={
"help": "Whether to quantize weights with 7-bits. It may improve the accuracy for some models running on non-VNNI machine, especially for per-channel mode"
},
)
# 4-bit quantization
block_size: int = field(
default=None,
metadata={
"help": "Block size for blockwise quantization. Note: bnb.nn.Linear4bit only uses block_size=64"
},
)
# MatMul4BitsQuantizer
is_symmetric: bool = field(
default=True,
metadata={"help": "Indicate whether to quantize the model symmetrically"},
)
accuracy_level: int = field(
default=None,
metadata={
"help": "Accuracy level of the 4-bit quantized MatMul computation. "
"Refer to the MatMulNBits contrib op's 'accuracy_level' attribute for details "
"(https://github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md#commicrosoftmatmulnbits)."
},
)
# MatMulBnb4Quantizer
quant_type: int = field(
default=MatMulBnb4Quantizer.NF4,
metadata={
"help": "Quantization data type. 0: FP4, 1: NF4",
"choices": [MatMulBnb4Quantizer.FP4, MatMulBnb4Quantizer.NF4],
},
)
op_block_list: List[str] = field(
default=None,
metadata={
"help": "List of operators to exclude from quantization."
"Can be any standard ONNX operator (see https://onnx.ai/onnx/operators/)"
"or your custom implemented operators.",
"nargs": "+",
},
)
def get_operators(model: onnx.ModelProto) -> Set[str]:
operators = set()
def traverse_graph(graph):
for node in graph.node:
operators.add(node.op_type)
for attr in node.attribute:
if attr.type == onnx.AttributeProto.GRAPH:
traverse_graph(attr.g)
traverse_graph(model.graph)
return operators
def quantize_q8(
model: onnx.ModelProto,
save_path: str,
per_channel: bool,
reduce_range: bool,
weight_type: QuantType,
op_block_list: Optional[List[str]]
):
"""
Quantize the weights of the model from float32 to int8/uint8
Uses unsigned ints for activation values, signed ints for weights, per
https://onnxruntime.ai/docs/performance/quantization.html#data-type-selection
it is faster on most CPU architectures
"""
op_types_to_quantize = set(IntegerOpsRegistry.keys())
if op_block_list is not None:
op_types_to_quantize.difference_update(op_block_list)
quantizer = ONNXQuantizer(
model,
per_channel,
reduce_range,
mode=QuantizationMode.IntegerOps,
static=False,
weight_qType=weight_type,
activation_qType=QuantType.QUInt8, # dynamic activation only supports uint8
tensors_range=None,
nodes_to_quantize=[],
nodes_to_exclude=[],
op_types_to_quantize=op_types_to_quantize,
extra_options=dict(
EnableSubgraph=True,
MatMulConstBOnly=True,
),
)
quantizer.quantize_model()
check_and_save_model(quantizer.model.model, save_path)
def quantize_fp16(
model: onnx.ModelProto,
save_path: str,
op_block_list: Optional[List[str]]
):
"""
Quantize the weights of the model from float32 to float16
"""
# Check whether we should disable shape infer:
# ValueError: Message onnx.ModelProto exceeds maximum protobuf size of 2GB: 2338583841
disable_shape_infer = model.ByteSize() >= onnx.checker.MAXIMUM_PROTOBUF
blocked_ops = set(float16.DEFAULT_OP_BLOCK_LIST)
if op_block_list is not None:
blocked_ops.update(op_block_list)
model_fp16 = float16.convert_float_to_float16(
model,
keep_io_types=True,
disable_shape_infer=disable_shape_infer,
op_block_list=blocked_ops,
)
graph = gs.import_onnx(model_fp16)
graph.toposort()
model_fp16 = gs.export_onnx(graph)
check_and_save_model(model_fp16, save_path)
def quantize_q4(
model: onnx.ModelProto,
save_path: str | None,
block_size: int,
is_symmetric: bool,
accuracy_level: int,
):
"""
Quantize the weights of the model from float32 to 4-bit int
"""
quantizer = MatMul4BitsQuantizer(
model=model,
block_size=block_size,
is_symmetric=is_symmetric,
accuracy_level=accuracy_level,
)
quantizer.process()
if save_path:
check_and_save_model(quantizer.model.model, save_path)
return quantizer.model.model
def quantize_bnb4(
model: onnx.ModelProto,
save_path: str,
block_size: int,
quant_type: int,
):
"""
Quantize the weights of the model from float32 to 4-bit int using MatMulBnb4Quantizer
"""
quantizer = MatMulBnb4Quantizer(
model=model,
block_size=block_size,
quant_type=quant_type,
)
quantizer.process()
check_and_save_model(quantizer.model.model, save_path)
return quantizer.model.model
def quantize(input_folder, output_folder, quantization_args: QuantizationArguments):
# (Step 1) Validate the arguments
if not quantization_args.modes:
raise ValueError("At least one quantization mode must be specified")
if not os.path.exists(input_folder):
raise ValueError(f"Input folder {input_folder} does not exist")
model_names_or_paths = [
os.path.join(input_folder, file)
for file in os.listdir(input_folder)
if file.endswith(".onnx")
]
if not model_names_or_paths:
raise ValueError(f"No .onnx models found in {input_folder}")
os.makedirs(output_folder, exist_ok=True)
# (Step 2) Quantize the models
for model_path in (progress_models := tqdm(model_names_or_paths)):
progress_models.set_description(f"Processing {model_path}")
file_name_without_extension = os.path.splitext(os.path.basename(model_path))[0]
for mode in (progress := tqdm(quantization_args.modes)):
progress.set_description(f" - Quantizing to {mode}")
mode = QuantMode(mode)
suffix = QUANTIZE_SUFFIX_MAPPING.get(mode, mode.value)
save_path = os.path.join(
output_folder,
f"{file_name_without_extension}_{suffix}.onnx",
)
# NOTE: Unfortunately, we need to reload the model for each quantization mode,
# which is memory inefficient. This is because the quantization functions
# modify the model in-place, and we need to keep the original model for each mode.
model = onnx.load_model(model_path)
if mode == QuantMode.FP16:
quantize_fp16(
model,
save_path,
quantization_args.op_block_list
)
elif mode in (QuantMode.Q4, QuantMode.Q4F16):
block_size = quantization_args.block_size or 32
q4_model = quantize_q4(
model,
save_path=None if mode == QuantMode.Q4F16 else save_path,
block_size=block_size,
is_symmetric=quantization_args.is_symmetric,
accuracy_level=quantization_args.accuracy_level,
)
if mode == QuantMode.Q4F16:
quantize_fp16(
q4_model,
save_path,
quantization_args.op_block_list,
)
elif mode == QuantMode.BNB4:
quantize_bnb4(
model,
save_path,
block_size=quantization_args.block_size or 64,
quant_type=(
quantization_args.quant_type
if quantization_args.quant_type is not None
else MatMulBnb4Quantizer.NF4
),
)
elif mode in (QuantMode.Q8, QuantMode.QI8, QuantMode.QU8):
if mode == QuantMode.Q8:
op_types = get_operators(model)
weight_type = (
QuantType.QUInt8
if any(x in QUINT8_OPS for x in op_types)
else QuantType.QInt8
)
elif mode == QuantMode.QI8:
weight_type = QuantType.QInt8
else: # mode == QuantMode.QU8:
weight_type = QuantType.QUInt8
quantize_q8(
model,
save_path,
per_channel=quantization_args.per_channel,
reduce_range=quantization_args.reduce_range,
weight_type=weight_type,
op_block_list=quantization_args.op_block_list,
)
def main():
parser = HfArgumentParser((IOArguments, QuantizationArguments))
io_args, quantization_args = parser.parse_args_into_dataclasses()
input_folder = io_args.input_folder
output_folder = io_args.output_folder
quantize(input_folder, output_folder, quantization_args)
if __name__ == "__main__":
main()
| transformers.js/scripts/quantize.py/0 | {
"file_path": "transformers.js/scripts/quantize.py",
"repo_id": "transformers.js",
"token_count": 5609
} |
import { FEATURE_EXTRACTOR_NAME, GITHUB_ISSUE_URL } from '../../utils/constants.js';
import { getModelJSON } from '../../utils/hub.js';
import { FeatureExtractor } from '../../base/feature_extraction_utils.js';
import * as AllFeatureExtractors from '../feature_extractors.js';
export class AutoFeatureExtractor {
/** @type {typeof FeatureExtractor.from_pretrained} */
static async from_pretrained(pretrained_model_name_or_path, options={}) {
const preprocessorConfig = await getModelJSON(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, true, options);
// Determine feature extractor class
const key = preprocessorConfig.feature_extractor_type;
const feature_extractor_class = AllFeatureExtractors[key];
if (!feature_extractor_class) {
throw new Error(`Unknown feature_extractor_type: '${key}'. Please report this at ${GITHUB_ISSUE_URL}.`);
}
// Instantiate feature extractor
return new feature_extractor_class(preprocessorConfig);
}
}
| transformers.js/src/models/auto/feature_extraction_auto.js/0 | {
"file_path": "transformers.js/src/models/auto/feature_extraction_auto.js",
"repo_id": "transformers.js",
"token_count": 367
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.