text
stringlengths 96
319k
| id
stringlengths 14
178
| metadata
dict |
---|---|---|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Once you have trained a policy with our training script (lerobot/scripts/train.py), use this script to push it
to the hub.
Example:
```bash
python lerobot/scripts/push_pretrained.py \
--pretrained_path=outputs/train/act_aloha_sim_transfer_cube_human/checkpoints/last/pretrained_model \
--repo_id=lerobot/act_aloha_sim_transfer_cube_human
```
"""
from dataclasses import dataclass
from pathlib import Path
import draccus
from huggingface_hub import HfApi
@dataclass
class PushPreTrainedConfig:
pretrained_path: Path
repo_id: str
branch: str | None = None
private: bool = False
exist_ok: bool = False
@draccus.wrap()
def main(cfg: PushPreTrainedConfig):
hub_api = HfApi()
hub_api.create_repo(
repo_id=cfg.repo_id,
private=cfg.private,
repo_type="model",
exist_ok=cfg.exist_ok,
)
if cfg.branch:
hub_api.create_branch(
repo_id=cfg.repo_id,
branch=cfg.branch,
repo_type="model",
exist_ok=cfg.exist_ok,
)
hub_api.upload_folder(
repo_id=cfg.repo_id,
folder_path=cfg.pretrained_path,
repo_type="model",
revision=cfg.branch,
)
if __name__ == "__main__":
main()
| lerobot/lerobot/scripts/push_pretrained.py/0 | {
"file_path": "lerobot/lerobot/scripts/push_pretrained.py",
"repo_id": "lerobot",
"token_count": 732
} |
"""Mocked classes and functions from dynamixel_sdk to allow for continuous integration
and testing code logic that requires hardware and devices (e.g. robot arms, cameras)
Warning: These mocked versions are minimalist. They do not exactly mock every behaviors
from the original classes and functions (e.g. return types might be None instead of boolean).
"""
# from dynamixel_sdk import COMM_SUCCESS
DEFAULT_BAUDRATE = 9_600
COMM_SUCCESS = 0 # tx or rx packet communication success
def convert_to_bytes(value, bytes):
# TODO(rcadene): remove need to mock `convert_to_bytes` by implemented the inverse transform
# `convert_bytes_to_value`
del bytes # unused
return value
def get_default_motor_values(motor_index):
return {
# Key (int) are from X_SERIES_CONTROL_TABLE
7: motor_index, # ID
8: DEFAULT_BAUDRATE, # Baud_rate
10: 0, # Drive_Mode
64: 0, # Torque_Enable
# Set 2560 since calibration values for Aloha gripper is between start_pos=2499 and end_pos=3144
# For other joints, 2560 will be autocorrected to be in calibration range
132: 2560, # Present_Position
}
class PortHandler:
def __init__(self, port):
self.port = port
# factory default baudrate
self.baudrate = DEFAULT_BAUDRATE
def openPort(self): # noqa: N802
return True
def closePort(self): # noqa: N802
pass
def setPacketTimeoutMillis(self, timeout_ms): # noqa: N802
del timeout_ms # unused
def getBaudRate(self): # noqa: N802
return self.baudrate
def setBaudRate(self, baudrate): # noqa: N802
self.baudrate = baudrate
class PacketHandler:
def __init__(self, protocol_version):
del protocol_version # unused
# Use packet_handler.data to communicate across Read and Write
self.data = {}
class GroupSyncRead:
def __init__(self, port_handler, packet_handler, address, bytes):
self.packet_handler = packet_handler
def addParam(self, motor_index): # noqa: N802
# Initialize motor default values
if motor_index not in self.packet_handler.data:
self.packet_handler.data[motor_index] = get_default_motor_values(motor_index)
def txRxPacket(self): # noqa: N802
return COMM_SUCCESS
def getData(self, index, address, bytes): # noqa: N802
return self.packet_handler.data[index][address]
class GroupSyncWrite:
def __init__(self, port_handler, packet_handler, address, bytes):
self.packet_handler = packet_handler
self.address = address
def addParam(self, index, data): # noqa: N802
# Initialize motor default values
if index not in self.packet_handler.data:
self.packet_handler.data[index] = get_default_motor_values(index)
self.changeParam(index, data)
def txPacket(self): # noqa: N802
return COMM_SUCCESS
def changeParam(self, index, data): # noqa: N802
self.packet_handler.data[index][self.address] = data
| lerobot/tests/mock_dynamixel_sdk.py/0 | {
"file_path": "lerobot/tests/mock_dynamixel_sdk.py",
"repo_id": "lerobot",
"token_count": 1181
} |
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.d
from copy import deepcopy
from uuid import uuid4
import numpy as np
import pytest
import torch
from lerobot.common.datasets.online_buffer import OnlineBuffer, compute_sampler_weights
# Some constants for OnlineBuffer tests.
data_key = "data"
data_shape = (2, 3) # just some arbitrary > 1D shape
buffer_capacity = 100
fps = 10
def make_new_buffer(
write_dir: str | None = None, delta_timestamps: dict[str, list[float]] | None = None
) -> tuple[OnlineBuffer, str]:
if write_dir is None:
write_dir = f"/tmp/online_buffer_{uuid4().hex}"
buffer = OnlineBuffer(
write_dir,
data_spec={data_key: {"shape": data_shape, "dtype": np.dtype("float32")}},
buffer_capacity=buffer_capacity,
fps=fps,
delta_timestamps=delta_timestamps,
)
return buffer, write_dir
def make_spoof_data_frames(n_episodes: int, n_frames_per_episode: int) -> dict[str, np.ndarray]:
new_data = {
data_key: np.arange(n_frames_per_episode * n_episodes * np.prod(data_shape)).reshape(-1, *data_shape),
OnlineBuffer.INDEX_KEY: np.arange(n_frames_per_episode * n_episodes),
OnlineBuffer.EPISODE_INDEX_KEY: np.repeat(np.arange(n_episodes), n_frames_per_episode),
OnlineBuffer.FRAME_INDEX_KEY: np.tile(np.arange(n_frames_per_episode), n_episodes),
OnlineBuffer.TIMESTAMP_KEY: np.tile(np.arange(n_frames_per_episode) / fps, n_episodes),
}
return new_data
def test_non_mutate():
"""Checks that the data provided to the add_data method is copied rather than passed by reference.
This means that mutating the data in the buffer does not mutate the original data.
NOTE: If this test fails, it means some of the other tests may be compromised. For example, we can't trust
a success case for `test_write_read`.
"""
buffer, _ = make_new_buffer()
new_data = make_spoof_data_frames(2, buffer_capacity // 4)
new_data_copy = deepcopy(new_data)
buffer.add_data(new_data)
buffer._data[data_key][:] += 1
assert all(np.array_equal(new_data[k], new_data_copy[k]) for k in new_data)
def test_index_error_no_data():
buffer, _ = make_new_buffer()
with pytest.raises(IndexError):
buffer[0]
def test_index_error_with_data():
buffer, _ = make_new_buffer()
n_frames = buffer_capacity // 2
new_data = make_spoof_data_frames(1, n_frames)
buffer.add_data(new_data)
with pytest.raises(IndexError):
buffer[n_frames]
with pytest.raises(IndexError):
buffer[-n_frames - 1]
@pytest.mark.parametrize("do_reload", [False, True])
def test_write_read(do_reload: bool):
"""Checks that data can be added to the buffer and read back.
If do_reload we delete the buffer object and load the buffer back from disk before reading.
"""
buffer, write_dir = make_new_buffer()
n_episodes = 2
n_frames_per_episode = buffer_capacity // 4
new_data = make_spoof_data_frames(n_episodes, n_frames_per_episode)
buffer.add_data(new_data)
if do_reload:
del buffer
buffer, _ = make_new_buffer(write_dir)
assert len(buffer) == n_frames_per_episode * n_episodes
for i, item in enumerate(buffer):
assert all(isinstance(item[k], torch.Tensor) for k in item)
assert np.array_equal(item[data_key].numpy(), new_data[data_key][i])
def test_read_data_key():
"""Tests that data can be added to a buffer and all data for a. specific key can be read back."""
buffer, _ = make_new_buffer()
n_episodes = 2
n_frames_per_episode = buffer_capacity // 4
new_data = make_spoof_data_frames(n_episodes, n_frames_per_episode)
buffer.add_data(new_data)
data_from_buffer = buffer.get_data_by_key(data_key)
assert isinstance(data_from_buffer, torch.Tensor)
assert np.array_equal(data_from_buffer.numpy(), new_data[data_key])
def test_fifo():
"""Checks that if data is added beyond the buffer capacity, we discard the oldest data first."""
buffer, _ = make_new_buffer()
n_frames_per_episode = buffer_capacity // 4
n_episodes = 3
new_data = make_spoof_data_frames(n_episodes, n_frames_per_episode)
buffer.add_data(new_data)
n_more_episodes = 2
# Developer sanity check (in case someone changes the global `buffer_capacity`).
assert (
n_episodes + n_more_episodes
) * n_frames_per_episode > buffer_capacity, "Something went wrong with the test code."
more_new_data = make_spoof_data_frames(n_more_episodes, n_frames_per_episode)
buffer.add_data(more_new_data)
assert len(buffer) == buffer_capacity, "The buffer should be full."
expected_data = {}
for k in new_data:
# Concatenate, left-truncate, then roll, to imitate the cyclical FIFO pattern in OnlineBuffer.
expected_data[k] = np.roll(
np.concatenate([new_data[k], more_new_data[k]])[-buffer_capacity:],
shift=len(new_data[k]) + len(more_new_data[k]) - buffer_capacity,
axis=0,
)
for i, item in enumerate(buffer):
assert all(isinstance(item[k], torch.Tensor) for k in item)
assert np.array_equal(item[data_key].numpy(), expected_data[data_key][i])
def test_delta_timestamps_within_tolerance():
"""Check that getting an item with delta_timestamps within tolerance succeeds.
Note: Copied from `test_datasets.py::test_load_previous_and_future_frames_within_tolerance`.
"""
# Sanity check on global fps as we are assuming it is 10 here.
assert fps == 10, "This test assumes fps==10"
buffer, _ = make_new_buffer(delta_timestamps={"index": [-0.2, 0, 0.139]})
new_data = make_spoof_data_frames(n_episodes=1, n_frames_per_episode=5)
buffer.add_data(new_data)
buffer.tolerance_s = 0.04
item = buffer[2]
data, is_pad = item["index"], item[f"index{OnlineBuffer.IS_PAD_POSTFIX}"]
assert torch.allclose(data, torch.tensor([0, 2, 3])), "Data does not match expected values"
assert not is_pad.any(), "Unexpected padding detected"
def test_delta_timestamps_outside_tolerance_inside_episode_range():
"""Check that getting an item with delta_timestamps outside of tolerance fails.
We expect it to fail if and only if the requested timestamps are within the episode range.
Note: Copied from
`test_datasets.py::test_load_previous_and_future_frames_outside_tolerance_inside_episode_range`
"""
# Sanity check on global fps as we are assuming it is 10 here.
assert fps == 10, "This test assumes fps==10"
buffer, _ = make_new_buffer(delta_timestamps={"index": [-0.2, 0, 0.141]})
new_data = make_spoof_data_frames(n_episodes=1, n_frames_per_episode=5)
buffer.add_data(new_data)
buffer.tolerance_s = 0.04
with pytest.raises(AssertionError):
buffer[2]
def test_delta_timestamps_outside_tolerance_outside_episode_range():
"""Check that copy-padding of timestamps outside of the episode range works.
Note: Copied from
`test_datasets.py::test_load_previous_and_future_frames_outside_tolerance_outside_episode_range`
"""
# Sanity check on global fps as we are assuming it is 10 here.
assert fps == 10, "This test assumes fps==10"
buffer, _ = make_new_buffer(delta_timestamps={"index": [-0.3, -0.24, 0, 0.26, 0.3]})
new_data = make_spoof_data_frames(n_episodes=1, n_frames_per_episode=5)
buffer.add_data(new_data)
buffer.tolerance_s = 0.04
item = buffer[2]
data, is_pad = item["index"], item["index_is_pad"]
assert torch.equal(data, torch.tensor([0, 0, 2, 4, 4])), "Data does not match expected values"
assert torch.equal(
is_pad, torch.tensor([True, False, False, True, True])
), "Padding does not match expected values"
# Arbitrarily set small dataset sizes, making sure to have uneven sizes.
@pytest.mark.parametrize("offline_dataset_size", [1, 6])
@pytest.mark.parametrize("online_dataset_size", [0, 4])
@pytest.mark.parametrize("online_sampling_ratio", [0.0, 1.0])
def test_compute_sampler_weights_trivial(
lerobot_dataset_factory,
tmp_path,
offline_dataset_size: int,
online_dataset_size: int,
online_sampling_ratio: float,
):
offline_dataset = lerobot_dataset_factory(tmp_path, total_episodes=1, total_frames=offline_dataset_size)
online_dataset, _ = make_new_buffer()
if online_dataset_size > 0:
online_dataset.add_data(
make_spoof_data_frames(n_episodes=2, n_frames_per_episode=online_dataset_size // 2)
)
weights = compute_sampler_weights(
offline_dataset, online_dataset=online_dataset, online_sampling_ratio=online_sampling_ratio
)
if offline_dataset_size == 0 or online_dataset_size == 0:
expected_weights = torch.ones(offline_dataset_size + online_dataset_size)
elif online_sampling_ratio == 0:
expected_weights = torch.cat([torch.ones(offline_dataset_size), torch.zeros(online_dataset_size)])
elif online_sampling_ratio == 1:
expected_weights = torch.cat([torch.zeros(offline_dataset_size), torch.ones(online_dataset_size)])
expected_weights /= expected_weights.sum()
assert torch.allclose(weights, expected_weights)
def test_compute_sampler_weights_nontrivial_ratio(lerobot_dataset_factory, tmp_path):
# Arbitrarily set small dataset sizes, making sure to have uneven sizes.
offline_dataset = lerobot_dataset_factory(tmp_path, total_episodes=1, total_frames=4)
online_dataset, _ = make_new_buffer()
online_dataset.add_data(make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2))
online_sampling_ratio = 0.8
weights = compute_sampler_weights(
offline_dataset, online_dataset=online_dataset, online_sampling_ratio=online_sampling_ratio
)
assert torch.allclose(
weights, torch.tensor([0.05, 0.05, 0.05, 0.05, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
)
def test_compute_sampler_weights_nontrivial_ratio_and_drop_last_n(lerobot_dataset_factory, tmp_path):
# Arbitrarily set small dataset sizes, making sure to have uneven sizes.
offline_dataset = lerobot_dataset_factory(tmp_path, total_episodes=1, total_frames=4)
online_dataset, _ = make_new_buffer()
online_dataset.add_data(make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2))
weights = compute_sampler_weights(
offline_dataset, online_dataset=online_dataset, online_sampling_ratio=0.8, online_drop_n_last_frames=1
)
assert torch.allclose(
weights, torch.tensor([0.05, 0.05, 0.05, 0.05, 0.2, 0.0, 0.2, 0.0, 0.2, 0.0, 0.2, 0.0])
)
def test_compute_sampler_weights_drop_n_last_frames(lerobot_dataset_factory, tmp_path):
"""Note: test copied from test_sampler."""
offline_dataset = lerobot_dataset_factory(tmp_path, total_episodes=1, total_frames=2)
online_dataset, _ = make_new_buffer()
online_dataset.add_data(make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2))
weights = compute_sampler_weights(
offline_dataset,
offline_drop_n_last_frames=1,
online_dataset=online_dataset,
online_sampling_ratio=0.5,
online_drop_n_last_frames=1,
)
assert torch.allclose(weights, torch.tensor([0.5, 0, 0.125, 0, 0.125, 0, 0.125, 0, 0.125, 0]))
| lerobot/tests/test_online_buffer.py/0 | {
"file_path": "lerobot/tests/test_online_buffer.py",
"repo_id": "lerobot",
"token_count": 4665
} |
compute_environment: LOCAL_MACHINE
debug: false
distributed_type: MULTI_GPU
downcast_bf16: 'no'
gpu_ids: all
machine_rank: 0
main_training_function: main
mixed_precision: bf16
num_machines: 1
num_processes: 8
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: false
| open-r1/recipes/accelerate_configs/ddp.yaml/0 | {
"file_path": "open-r1/recipes/accelerate_configs/ddp.yaml",
"repo_id": "open-r1",
"token_count": 129
} |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# torch.compile
In PEFT, [torch.compile](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) works for some but not all features. The reason why it won't always work is because PEFT is highly dynamic in certain places (loading and switching between multiple adapters, for instance), which can cause trouble for `torch.compile`. In other places, `torch.compile` may work, but won't be as fast as expected because of graph breaks.
If you don't see an error, it doesn't necessarily mean that `torch.compile` worked correctly. It might give you an output, but the output is incorrect. This guide describes what works with `torch.compile` and what doesn't. For your own testing, we recommend using the latest PyTorch version, as `torch.compile` is constantly being improved.
> [!TIP]
> Unless indicated otherwise, the default `torch.compile` settings were used.
## Training and inference with `torch.compile`
These features **work** with `torch.compile`. Everything listed below was tested with a causal LM:
- Training with `Trainer` from 🤗 transformers
- Training with a custom PyTorch loop
- Inference
- Generation
The following adapters were tested successfully:
- AdaLoRA
- BOFT
- Bone
- IA³
- Layer Norm Tuning
- LoHa
- LoKr
- LoRA
- LoRA + DoRA
- LoRA applied to embedding layers
- OFT
- VeRA
- HRA
## Advanced PEFT features with `torch.compile`
Below are some of the more advanced PEFT features that **work**. They were all tested with LoRA.
- `modules_to_save` (i.e. `config = LoraConfig(..., modules_to_save=...)`)
- Merging adapters (one or multiple)
- Merging multiple adapters into one adapter (i.e. calling `model.add_weighted_adapter(...)`)
- Using PEFT adapters with quantization (bitsandbytes)
- Disabling adapters (i.e. using `with model.disable_adapter()`)
- Unloading (i.e. calling `model.merge_and_unload()`)
- Mixed adapter batches (i.e. calling `model(batch, adapter_names=["__base__", "default", "other", ...])`)
- Inference with multiple adapters (i.e. using `model.add_adapter` or `model.load_adapter` to load more than 1 adapter); for this, only call `torch.compile` _after_ loading all adapters
Generally, we can expect that if a feature works correctly with LoRA and is also supported by other adapter types, it should also work for that adapter type.
## Test cases
All the use cases listed above are tested inside of [`peft/tests/test_torch_compile.py`](https://github.com/huggingface/peft/blob/main/tests/test_torch_compile.py). If you want to check in more detail how we tested a certain feature, please go to that file and check the test that corresponds to your use case.
> [!TIP]
> If you have another use case where you know that `torch.compile` does or does not work with PEFT, please contribute by letting us know or by opening a PR to add this use case to the covered test cases.
| peft/docs/source/developer_guides/torch_compile.md/0 | {
"file_path": "peft/docs/source/developer_guides/torch_compile.md",
"repo_id": "peft",
"token_count": 1014
} |
PEFT_TYPE="boft"
BLOCK_NUM=8
BLOCK_SIZE=0
N_BUTTERFLY_FACTOR=1
export DATASET_NAME="oftverse/control-celeba-hq"
export PROJECT_NAME="controlnet_${PEFT_TYPE}"
export RUN_NAME="${PEFT_TYPE}_${BLOCK_NUM}${BLOCK_SIZE}${N_BUTTERFLY_FACTOR}"
export CONTROLNET_PATH=""
export MODEL_NAME="stabilityai/stable-diffusion-2-1"
# export MODEL_NAME="runwayml/stable-diffusion-v1-5"
export OUTPUT_DIR="./output/${DATASET_NAME}/${RUN_NAME}"
accelerate launch train_controlnet.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--resume_from_checkpoint=$RESUME_PATH \
--controlnet_model_name_or_path=$CONTROLNET_PATH \
--output_dir=$OUTPUT_DIR \
--report_to="wandb" \
--dataset_name=$DATASET_NAME \
--resolution=512 \
--learning_rate=1e-5 \
--checkpointing_steps=500 \
--max_train_steps=50000 \
--validation_steps=5000 \
--num_validation_images=12 \
--train_batch_size=4 \
--dataloader_num_workers=2 \
--seed="0" \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--wandb_project_name=$PROJECT_NAME \
--wandb_run_name=$RUN_NAME \
--enable_xformers_memory_efficient_attention \
--use_boft \
--boft_block_num=$BLOCK_NUM \
--boft_block_size=$BLOCK_SIZE \
--boft_n_butterfly_factor=$N_BUTTERFLY_FACTOR \
--boft_dropout=0.1 \
--boft_bias="boft_only" \ | peft/examples/boft_controlnet/train_controlnet.sh/0 | {
"file_path": "peft/examples/boft_controlnet/train_controlnet.sh",
"repo_id": "peft",
"token_count": 557
} |
import argparse
import os
import warnings
from typing import Optional
from huggingface_hub import HfFolder, whoami
from transformers import PretrainedConfig
def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
text_encoder_config = PretrainedConfig.from_pretrained(
pretrained_model_name_or_path,
subfolder="text_encoder",
revision=revision,
)
model_class = text_encoder_config.architectures[0]
if model_class == "CLIPTextModel":
from transformers import CLIPTextModel
return CLIPTextModel
elif model_class == "RobertaSeriesModelWithTransformation":
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
return RobertaSeriesModelWithTransformation
else:
raise ValueError(f"{model_class} is not supported.")
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
if token is None:
token = HfFolder.get_token()
if organization is None:
username = whoami(token)["name"]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def parse_args(input_args=None):
parser = argparse.ArgumentParser(description="Simple example of a Dreambooth training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--instance_data_dir",
type=str,
default=None,
required=True,
help="A folder containing the training data of instance images.",
)
parser.add_argument(
"--class_data_dir",
type=str,
default=None,
required=False,
help="A folder containing the training data of class images.",
)
parser.add_argument(
"--instance_prompt",
type=str,
default=None,
required=True,
help="The prompt with identifier specifying the instance",
)
parser.add_argument(
"--class_prompt",
type=str,
default=None,
help="The prompt to specify images in the same class as provided instance images.",
)
parser.add_argument(
"--with_prior_preservation",
default=False,
action="store_true",
help="Flag to add prior preservation loss.",
)
parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
parser.add_argument(
"--num_class_images",
type=int,
default=100,
help=(
"Minimal class images for prior preservation loss. If there are not enough images already present in"
" class_data_dir, additional images will be sampled with class_prompt."
),
)
parser.add_argument(
"--validation_prompt",
nargs="+",
help="A prompt that is used during validation to verify that the model is learning.",
)
parser.add_argument(
"--num_validation_images",
type=int,
default=4,
help="Number of images that should be generated during validation with `validation_prompt`.",
)
parser.add_argument(
"--validation_steps",
type=int,
default=500,
help=(
"Run dreambooth validation every X steps. Dreambooth validation consists of running the prompt"
" `args.validation_prompt` multiple times: `args.num_validation_images`."
),
)
parser.add_argument(
"--output_dir",
type=str,
default="text-inversion-model",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
)
parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
parser.add_argument(
"--set_grads_to_none",
action="store_true",
help=(
"Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain"
" behaviors, so disable this argument if it causes any problems. More info:"
" https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html"
),
)
# boft args
parser.add_argument("--use_boft", action="store_true", help="Whether to use BOFT for parameter efficient tuning")
parser.add_argument("--boft_block_num", type=int, default=4, help="The number of BOFT blocks")
parser.add_argument("--boft_block_size", type=int, default=0, help="The size of BOFT blocks")
parser.add_argument("--boft_n_butterfly_factor", type=int, default=2, help="The number of butterfly factors")
parser.add_argument("--boft_dropout", type=float, default=0.1, help="BOFT dropout, only used if use_boft is True")
parser.add_argument(
"--boft_bias",
type=str,
default="none",
help="Bias type for BOFT. Can be 'none', 'all' or 'boft_only', only used if use_boft is True",
)
parser.add_argument(
"--num_dataloader_workers", type=int, default=1, help="Num of workers for the training dataloader."
)
parser.add_argument(
"--no_tracemalloc",
default=False,
action="store_true",
help="Flag to stop memory allocation tracing during training. This could speed up training on Windows.",
)
parser.add_argument(
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
)
parser.add_argument(
"--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
)
parser.add_argument("--num_train_epochs", type=int, default=1)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
" checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-6,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--lr_num_cycles",
type=int,
default=1,
help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
)
parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
parser.add_argument(
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)
parser.add_argument(
"--report_to",
type=str,
default="wandb",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--wandb_key",
type=str,
default=None,
help=("If report to option is set to wandb, api-key for wandb used for login to wandb "),
)
parser.add_argument(
"--wandb_project_name",
type=str,
default=None,
help=("If report to option is set to wandb, project name in wandb for log tracking "),
)
parser.add_argument(
"--wandb_run_name",
type=str,
default=None,
help=("If report to option is set to wandb, project name in wandb for log tracking "),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--prior_generation_precision",
type=str,
default=None,
choices=["no", "fp32", "fp16", "bf16"],
help=(
"Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
),
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
)
if input_args is not None:
args = parser.parse_args(input_args)
else:
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
# Sanity checks
# if args.dataset_name is None and args.train_data_dir is None:
# raise ValueError("Need either a dataset name or a training folder.")
if args.with_prior_preservation:
if args.class_data_dir is None:
raise ValueError("You must specify a data directory for class images.")
if args.class_prompt is None:
raise ValueError("You must specify prompt for class images.")
else:
# logger is not available yet
if args.class_data_dir is not None:
warnings.warn("You need not use --class_data_dir without --with_prior_preservation.")
if args.class_prompt is not None:
warnings.warn("You need not use --class_prompt without --with_prior_preservation.")
return args
| peft/examples/boft_dreambooth/utils/args_loader.py/0 | {
"file_path": "peft/examples/boft_dreambooth/utils/args_loader.py",
"repo_id": "peft",
"token_count": 5745
} |
<jupyter_start><jupyter_code>from transformers import AutoModelForSeq2SeqLM
import peft
from peft import get_peft_config, get_peft_model, get_peft_model_state_dict, IA3Config, TaskType
import torch
from datasets import load_dataset
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from transformers import AutoTokenizer
from torch.utils.data import DataLoader
from transformers import default_data_collator, get_linear_schedule_with_warmup
from tqdm import tqdm
from datasets import load_dataset
device = "cuda"
model_name_or_path = "bigscience/mt0-large"
tokenizer_name_or_path = "bigscience/mt0-large"
checkpoint_name = "financial_sentiment_analysis_ia3_v1.pt"
text_column = "sentence"
label_column = "text_label"
max_length = 128
lr = 8e-3
num_epochs = 3
batch_size = 8
import importlib
importlib.reload(peft)
# creating model
peft_config = IA3Config(task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, feedforward_modules=[])
model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
model
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
model
# loading dataset
dataset = load_dataset("financial_phrasebank", "sentences_allagree")
dataset = dataset["train"].train_test_split(test_size=0.1)
dataset["validation"] = dataset["test"]
del dataset["test"]
classes = dataset["train"].features["label"].names
dataset = dataset.map(
lambda x: {"text_label": [classes[label] for label in x["label"]]},
batched=True,
num_proc=1,
)
dataset["train"][0]
# data preprocessing
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
def preprocess_function(examples):
inputs = examples[text_column]
targets = examples[label_column]
model_inputs = tokenizer(inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt")
labels = tokenizer(targets, max_length=3, padding="max_length", truncation=True, return_tensors="pt")
labels = labels["input_ids"]
labels[labels == tokenizer.pad_token_id] = -100
model_inputs["labels"] = labels
return model_inputs
processed_datasets = dataset.map(
preprocess_function,
batched=True,
num_proc=1,
remove_columns=dataset["train"].column_names,
load_from_cache_file=False,
desc="Running tokenizer on dataset",
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["validation"]
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)
# optimizer and lr scheduler
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=(len(train_dataloader) * num_epochs),
)
# training and evaluation
model = model.to(device)
for epoch in range(num_epochs):
model.train()
total_loss = 0
for step, batch in enumerate(tqdm(train_dataloader)):
batch = {k: v.to(device) for k, v in batch.items()}
outputs = model(**batch)
loss = outputs.loss
total_loss += loss.detach().float()
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
eval_loss = 0
eval_preds = []
for step, batch in enumerate(tqdm(eval_dataloader)):
batch = {k: v.to(device) for k, v in batch.items()}
with torch.no_grad():
outputs = model(**batch)
loss = outputs.loss
eval_loss += loss.detach().float()
eval_preds.extend(
tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True)
)
eval_epoch_loss = eval_loss / len(eval_dataloader)
eval_ppl = torch.exp(eval_epoch_loss)
train_epoch_loss = total_loss / len(train_dataloader)
train_ppl = torch.exp(train_epoch_loss)
print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}")
# print accuracy
correct = 0
total = 0
for pred, true in zip(eval_preds, dataset["validation"]["text_label"]):
if pred.strip() == true.strip():
correct += 1
total += 1
accuracy = correct / total * 100
print(f"{accuracy=} % on the evaluation dataset")
print(f"{eval_preds[:10]=}")
print(f"{dataset['validation']['text_label'][:10]=}")
# saving model
peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}"
model.save_pretrained(peft_model_id)
ckpt = f"{peft_model_id}/adapter_model.bin"
!du -h $ckpt
from peft import PeftModel, PeftConfig
peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}"
config = PeftConfig.from_pretrained(peft_model_id)
model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path)
model = PeftModel.from_pretrained(model, peft_model_id)
model.eval()
i = 13
inputs = tokenizer(dataset["validation"][text_column][i], return_tensors="pt")
print(dataset["validation"][text_column][i])
print(inputs)
with torch.no_grad():
outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10)
print(outputs)
print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))<jupyter_output>25 November 2010 - Finnish paints and coatings company Tikkurila Oyj ( HEL : TIK1V ) said today that Finnish state-owned investment company Solidium Oy sold its 14.7 % stake in the company for a total of EUR98m .
{'input_ids': tensor([[ 877, 3277, 1068, 259, 264, 515, 143136, 42068, 263,
305, 259, 101264, 263, 5835, 22538, 4496, 2697, 20860,
385, 274, 76347, 259, 267, 259, 93686, 353, 561,
259, 271, 2426, 7883, 533, 515, 143136, 6509, 264,
45815, 37624, 5835, 35133, 16558, 20860, 22026, 2476, 5006,
487, 1448, 259, 96189, 281, 287, 5835, 332, 259,
262, 2725, 304, 2687, 5577, 282, 259, 260, 1]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1,[...] | peft/examples/conditional_generation/peft_ia3_seq2seq.ipynb/0 | {
"file_path": "peft/examples/conditional_generation/peft_ia3_seq2seq.ipynb",
"repo_id": "peft",
"token_count": 2685
} |
<jupyter_start><jupyter_text>Fine-tuning [Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on [timdettmers/openassistant-guanaco](https://huggingface.co/datasets/timdettmers/openassistant-guanaco) Dataset using QDora (quantized Lora w/ use_dora=True) on T4 Free Colab GPU.<jupyter_code># Install the libraries
!pip install -q -U bitsandbytes
!pip install -q -U git+https://github.com/huggingface/transformers.git
!pip install -q -U git+https://github.com/huggingface/peft.git
!pip install -q -U git+https://github.com/huggingface/accelerate.git
!pip install -q datasets
# Required when training models/data that are gated on HuggingFace, and required for pushing models to HuggingFace
from huggingface_hub import notebook_login
notebook_login()<jupyter_output><empty_output><jupyter_text>Loading the model and it's tokenizer in quantized setup!<jupyter_code># setting up the config for 4-bit quantization of Qlora
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
model_id = "meta-llama/Meta-Llama-3-8B"
bnb_config = BitsAndBytesConfig(
load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config, device_map={"": 0})
# print(model)<jupyter_output><empty_output><jupyter_text>Prepare model for PEFT fine-tuning<jupyter_code>from peft import prepare_model_for_kbit_training
model.gradient_checkpointing_enable()
model = prepare_model_for_kbit_training(model)
def print_trainable_parameters(model):
"""
Prints the number of trainable parameters in the model.
"""
trainable_params = 0
all_param = 0
for _, param in model.named_parameters():
all_param += param.numel()
if param.requires_grad:
trainable_params += param.numel()
print(
f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}"
)<jupyter_output><empty_output><jupyter_text>Setup `LoraConfig` To use Dora we set the `use_dora=True`<jupyter_code>from peft import LoraConfig, get_peft_model
config = LoraConfig(
use_dora=True,
r=8,
lora_alpha=32,
target_modules=[
"q_proj",
"k_proj",
"v_proj",
"o_proj",
"gate_proj",
"up_proj",
"down_proj",
], # parameters specific to llama
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
print_trainable_parameters(model)<jupyter_output>trainable params: 22347776 || all params: 4562948096 || trainable%: 0.48976616717579247<jupyter_text>Step 2) Fine-tuning process 💥<jupyter_code># Load the dataset from HF
from datasets import load_dataset
data = load_dataset("timdettmers/openassistant-guanaco")
data = data.map(lambda samples: tokenizer(samples["text"]), batched=True)<jupyter_output><empty_output><jupyter_text>TrainingFor the sake of the demo, we just ran it for 10 steps just to showcase how to use this integration with existing tools on the HF ecosystem.<jupyter_code>import transformers
tokenizer.pad_token = tokenizer.eos_token
trainer = transformers.Trainer(
model=model,
train_dataset=data["train"],
args=transformers.TrainingArguments(
per_device_train_batch_size=1,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=10,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir="path/to/your/HF/repo", # change it to your desired repo!
optim="paged_adamw_8bit",
),
data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False # silence the warnings. Please re-enable for inference!
trainer.train()<jupyter_output>max_steps is given, it will override any value given in num_train_epochs
/usr/local/lib/python3.10/dist-packages/torch/utils/checkpoint.py:464: UserWarning: torch.utils.checkpoint: the use_reentrant parameter should be passed explicitly. In version 2.4 we will raise an exception if use_reentrant is not passed. use_reentrant=False is recommended, but if you need to preserve the current default behavior, you can pass use_reentrant=True. Refer to docs for more details on the differences between the two variants.
warnings.warn(<jupyter_text>Usage Example<jupyter_code>model.config.use_cache = True
model.eval();
from transformers import GenerationConfig
max_new_tokens = 120
top_p = 0.9
temperature = 0.7
user_question = "What is the purpose of quantization in LLMs?"
prompt = (
"A chat between a curious human and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the user's questions. "
"### Human: {user_question}"
"### Assistant: "
)
def generate(model, user_question, max_new_tokens=max_new_tokens, top_p=top_p, temperature=temperature):
inputs = tokenizer(prompt.format(user_question=user_question), return_tensors="pt").to("cuda")
outputs = model.generate(
**inputs,
generation_config=GenerationConfig(
do_sample=True,
max_new_tokens=max_new_tokens,
top_p=top_p,
temperature=temperature,
),
)
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# print(text)
return text
generate(model, user_question)
# trainer.push_to_hub()<jupyter_output><empty_output> | peft/examples/dora_finetuning/QDoRA_finetuning.ipynb/0 | {
"file_path": "peft/examples/dora_finetuning/QDoRA_finetuning.ipynb",
"repo_id": "peft",
"token_count": 2118
} |
<jupyter_start><jupyter_text>Initializing weights with LoftQ by replacing LoRA weights in-place This notebook shows how to apply [LoftQ](https://arxiv.org/abs/2310.08659) initialization on our QLoRA model.In short, the idea behind LoftQ is the following. When we use QLoRA, i.e. we quantize the base model with bitsandbytes to save memory, and then train LoRA weights on top of this base model, we expect a certain performance gap. This is partly due to the fact that quantization is onyl an approximation of the "real" weights and thus introduces a quantization error. By default, LoRA weights are initialized such that they are a no-op at the start of the training. However, we can instead initialize them so that they minimize the quantization error. This is the idea behind LoftQ.Note that this only influences the initialization of the model. Everything that follows stays the same as always. Imports<jupyter_code>import os
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
from peft import get_peft_model, LoraConfig, replace_lora_weights_loftq<jupyter_output><empty_output><jupyter_text>Functions<jupyter_code>def get_mae(x, y):
return (x - y).abs().mean()
def get_mse(x, y):
return torch.pow(x - y, 2).mean()
def error_report(x, y):
mae = get_mae(x, y)
mse = get_mse(x, y)
print(
f"Mean absolute error: {mae:>8.5f}\n"
f"Mean squared error: {mse:>8.5f}"
)<jupyter_output><empty_output><jupyter_text>Base model First, let's load a base model and calculate some logits. These logits are the baseline, i.e. we try to match their values as best as possible. We only need these logits for demonstration purposes. In practice, it is not necessary to load the non-quantized weights to apply LoftQ initialization.**Note**: We have to choose a model with a `model.safetensors` file. As PyTorch checkpoints (pickle) cannot be loaded lazily, we have to use [safetensors](https://huggingface.co/docs/safetensors/index). If those don't exist for your model, save the pretrained model as a safetensors file using `safe_pretrained` and pass the model path to `replace_lora_weights_loftq`.<jupyter_code>model_id = "bigscience/bloomz-560m"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
s = """Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!"""
inputs = tokenizer(s.splitlines(), return_tensors="pt", padding=True)<jupyter_output><empty_output><jupyter_text>Our baseline logits:<jupyter_code>logits_base = model(**inputs).logits<jupyter_output><empty_output><jupyter_text>Normal LoRA model Now we load the model quantized with bitsandbytes. For now, only 4bit is supported.<jupyter_code>bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_compute_dtype=torch.float16,
)
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config)<jupyter_output>`low_cpu_mem_usage` was None, now set to True since model is quantized.<jupyter_text>Next we create a LoRA model using PEFT and compute the logits of that model.<jupyter_code>lora_config = LoraConfig(task_type="CAUSAL_LM", target_modules="all-linear")
peft_model = get_peft_model(model, lora_config)
logits_lora = peft_model(**inputs).logits<jupyter_output>.../bitsandbytes/nn/modules.py:391: UserWarning: Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.
warnings.warn('Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.')<jupyter_text>Let's check the influence of the quantization error on our logits:<jupyter_code>error_report(logits_base, logits_lora)<jupyter_output>Mean absolute error: 3.61113
Mean squared error: 36.53259<jupyter_text>LoftQ Next, let's use LoftQ initialization and see if it helps reduce the error.<jupyter_code>replace_lora_weights_loftq(peft_model)
logits_loftq = peft_model(**inputs).logits
error_report(logits_base, logits_loftq)<jupyter_output>Mean absolute error: 3.24111
Mean squared error: 31.13725<jupyter_text>We can see that LoftQ initialization helped a little bit, but the difference is not huge. LoftQ with callback To help with this, let's write a small callback function and pass it to `replace_lora_weights_loftq`. What this function does is that each time one weight is being replaced with LoftQ-initialized weights, we perform a test if the quantization error is actually reduced. If it it is not, we roll back the replacement. This way, we keep only those replacements that improve the results.<jupyter_code># Since PEFT has modified the base model, we should reload it
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config)
peft_model = get_peft_model(model, lora_config)
current_mse = float("inf")
def my_callback(model, module_name):
"""Callable to replace weights with LoFTQ if the mse is lower than the current best one."""
global current_mse
logits = model(**inputs).logits
mse = get_mse(logits_base, logits)
if mse < current_mse:
current_mse = mse
print(f"MSE improved for module {module_name}")
return True
print(f"MSE did not improve for module {module_name}")
return False
replace_lora_weights_loftq(peft_model, callback=my_callback)
logits_loftq_callback = peft_model(**inputs).logits
error_report(logits_base, logits_loftq_callback)<jupyter_output>Mean absolute error: 1.79576
Mean squared error: 8.47075<jupyter_text>We can see that applying LoftQ with the help of the callback reduced the error quite significantly. Applying LoftQ multiple times It is possible to run `replace_lora_weights_loftq` multiple times on the same model when using the callback.<jupyter_code>replace_lora_weights_loftq(peft_model, callback=my_callback)
logits_loftq_callback_twice = peft_model(**inputs).logits
error_report(logits_base, logits_loftq_callback_twice)<jupyter_output>Mean absolute error: 1.76357
Mean squared error: 8.33938 | peft/examples/loftq_finetuning/LoftQ_weight_replacement.ipynb/0 | {
"file_path": "peft/examples/loftq_finetuning/LoftQ_weight_replacement.ipynb",
"repo_id": "peft",
"token_count": 2207
} |
import argparse
import gc
import hashlib
import itertools
import logging
import math
import os
import threading
import warnings
from contextlib import nullcontext
from pathlib import Path
import datasets
import diffusers
import numpy as np
import psutil
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from diffusers import (
AutoencoderKL,
DDPMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
UNet2DConditionModel,
)
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version
from diffusers.utils.import_utils import is_xformers_available
from huggingface_hub import HfApi
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import AutoTokenizer, PretrainedConfig
from peft import get_peft_model
from peft.tuners.oft.config import OFTConfig
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.10.0.dev0")
logger = get_logger(__name__)
UNET_TARGET_MODULES = ["to_q", "to_v", "query", "value"] # , "ff.net.0.proj"]
TEXT_ENCODER_TARGET_MODULES = ["q_proj", "v_proj"]
def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
text_encoder_config = PretrainedConfig.from_pretrained(
pretrained_model_name_or_path,
subfolder="text_encoder",
revision=revision,
)
model_class = text_encoder_config.architectures[0]
if model_class == "CLIPTextModel":
from transformers import CLIPTextModel
return CLIPTextModel
elif model_class == "RobertaSeriesModelWithTransformation":
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
return RobertaSeriesModelWithTransformation
else:
raise ValueError(f"{model_class} is not supported.")
def parse_args(input_args=None):
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--instance_data_dir",
type=str,
default=None,
required=True,
help="A folder containing the training data of instance images.",
)
parser.add_argument(
"--class_data_dir",
type=str,
default=None,
required=False,
help="A folder containing the training data of class images.",
)
parser.add_argument(
"--instance_prompt",
type=str,
default=None,
required=True,
help="The prompt with identifier specifying the instance",
)
parser.add_argument(
"--class_prompt",
type=str,
default=None,
help="The prompt to specify images in the same class as provided instance images.",
)
parser.add_argument(
"--with_prior_preservation",
default=False,
action="store_true",
help="Flag to add prior preservation loss.",
)
parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
parser.add_argument(
"--num_class_images",
type=int,
default=100,
help=(
"Minimal class images for prior preservation loss. If there are not enough images already present in"
" class_data_dir, additional images will be sampled with class_prompt."
),
)
parser.add_argument(
"--validation_prompt",
type=str,
default=None,
help="A prompt that is used during validation to verify that the model is learning.",
)
parser.add_argument(
"--num_validation_images",
type=int,
default=4,
help="Number of images that should be generated during validation with `validation_prompt`.",
)
parser.add_argument(
"--validation_steps",
type=int,
default=100,
help=(
"Run dreambooth validation every X steps. Dreambooth validation consists of running the prompt"
" `args.validation_prompt` multiple times: `args.num_validation_images`."
),
)
parser.add_argument(
"--output_dir",
type=str,
default="text-inversion-model",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
)
parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
# oft args
parser.add_argument("--use_oft", action="store_true", help="Whether to use OFT for parameter efficient tuning")
parser.add_argument("--oft_r", type=int, default=8, help="OFT rank, only used if use_oft is True")
parser.add_argument("--oft_alpha", type=int, default=32, help="OFT alpha, only used if use_oft is True")
parser.add_argument("--oft_dropout", type=float, default=0.0, help="OFT dropout, only used if use_oft is True")
parser.add_argument(
"--oft_use_coft", action="store_true", help="Using constrained OFT, only used if use_oft is True"
)
parser.add_argument(
"--oft_eps",
type=float,
default=0.0,
help="The control strength of COFT. Only has an effect if `oft_use_coft` is set to True.",
)
parser.add_argument(
"--oft_text_encoder_r",
type=int,
default=8,
help="OFT rank for text encoder, only used if `use_oft` and `train_text_encoder` are True",
)
parser.add_argument(
"--oft_text_encoder_alpha",
type=int,
default=32,
help="OFT alpha for text encoder, only used if `use_oft` and `train_text_encoder` are True",
)
parser.add_argument(
"--oft_text_encoder_dropout",
type=float,
default=0.0,
help="OFT dropout for text encoder, only used if `use_oft` and `train_text_encoder` are True",
)
parser.add_argument(
"--oft_text_encoder_use_coft",
action="store_true",
help="Using constrained OFT on the text encoder, only used if use_oft is True",
)
parser.add_argument(
"--oft_text_encoder_eps",
type=float,
default=0.0,
help="The control strength of COFT on the text encoder. Only has an effect if `oft_text_encoder_use_coft` is set to True.",
)
parser.add_argument(
"--num_dataloader_workers", type=int, default=1, help="Num of workers for the training dataloader."
)
parser.add_argument(
"--no_tracemalloc",
default=False,
action="store_true",
help="Flag to stop memory allocation tracing during training. This could speed up training on Windows.",
)
parser.add_argument(
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
)
parser.add_argument(
"--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
)
parser.add_argument("--num_train_epochs", type=int, default=1)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
" checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-6,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--lr_num_cycles",
type=int,
default=1,
help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
)
parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
parser.add_argument(
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--wandb_key",
type=str,
default=None,
help=("If report to option is set to wandb, api-key for wandb used for login to wandb "),
)
parser.add_argument(
"--wandb_project_name",
type=str,
default=None,
help=("If report to option is set to wandb, project name in wandb for log tracking "),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--prior_generation_precision",
type=str,
default=None,
choices=["no", "fp32", "fp16", "bf16"],
help=(
"Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
),
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
)
if input_args is not None:
args = parser.parse_args(input_args)
else:
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
if args.with_prior_preservation:
if args.class_data_dir is None:
raise ValueError("You must specify a data directory for class images.")
if args.class_prompt is None:
raise ValueError("You must specify prompt for class images.")
else:
# logger is not available yet
if args.class_data_dir is not None:
warnings.warn("You need not use --class_data_dir without --with_prior_preservation.")
if args.class_prompt is not None:
warnings.warn("You need not use --class_prompt without --with_prior_preservation.")
return args
# Converting Bytes to Megabytes
def b2mb(x):
return int(x / 2**20)
# This context manager is used to track the peak memory usage of the process
class TorchTracemalloc:
def __enter__(self):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
self.begin = torch.cuda.memory_allocated()
self.process = psutil.Process()
self.cpu_begin = self.cpu_mem_used()
self.peak_monitoring = True
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
return self
def cpu_mem_used(self):
"""get resident set size memory for the current process"""
return self.process.memory_info().rss
def peak_monitor_func(self):
self.cpu_peak = -1
while True:
self.cpu_peak = max(self.cpu_mem_used(), self.cpu_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
# time.sleep(0.001) # 1msec
if not self.peak_monitoring:
break
def __exit__(self, *exc):
self.peak_monitoring = False
gc.collect()
torch.cuda.empty_cache()
self.end = torch.cuda.memory_allocated()
self.peak = torch.cuda.max_memory_allocated()
self.used = b2mb(self.end - self.begin)
self.peaked = b2mb(self.peak - self.begin)
self.cpu_end = self.cpu_mem_used()
self.cpu_used = b2mb(self.cpu_end - self.cpu_begin)
self.cpu_peaked = b2mb(self.cpu_peak - self.cpu_begin)
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
class DreamBoothDataset(Dataset):
"""
A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
It pre-processes the images and the tokenizes prompts.
"""
def __init__(
self,
instance_data_root,
instance_prompt,
tokenizer,
class_data_root=None,
class_prompt=None,
size=512,
center_crop=False,
):
self.size = size
self.center_crop = center_crop
self.tokenizer = tokenizer
self.instance_data_root = Path(instance_data_root)
if not self.instance_data_root.exists():
raise ValueError("Instance images root doesn't exists.")
self.instance_images_path = list(Path(instance_data_root).iterdir())
self.num_instance_images = len(self.instance_images_path)
self.instance_prompt = instance_prompt
self._length = self.num_instance_images
if class_data_root is not None:
self.class_data_root = Path(class_data_root)
self.class_data_root.mkdir(parents=True, exist_ok=True)
self.class_images_path = list(self.class_data_root.iterdir())
self.num_class_images = len(self.class_images_path)
self._length = max(self.num_class_images, self.num_instance_images)
self.class_prompt = class_prompt
else:
self.class_data_root = None
self.image_transforms = transforms.Compose(
[
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __len__(self):
return self._length
def __getitem__(self, index):
example = {}
instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
if not instance_image.mode == "RGB":
instance_image = instance_image.convert("RGB")
example["instance_images"] = self.image_transforms(instance_image)
example["instance_prompt_ids"] = self.tokenizer(
self.instance_prompt,
truncation=True,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
).input_ids
if self.class_data_root:
class_image = Image.open(self.class_images_path[index % self.num_class_images])
if not class_image.mode == "RGB":
class_image = class_image.convert("RGB")
example["class_images"] = self.image_transforms(class_image)
example["class_prompt_ids"] = self.tokenizer(
self.class_prompt,
truncation=True,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
).input_ids
return example
def collate_fn(examples, with_prior_preservation=False):
input_ids = [example["instance_prompt_ids"] for example in examples]
pixel_values = [example["instance_images"] for example in examples]
# Concat class and instance examples for prior preservation.
# We do this to avoid doing two forward passes.
if with_prior_preservation:
input_ids += [example["class_prompt_ids"] for example in examples]
pixel_values += [example["class_images"] for example in examples]
pixel_values = torch.stack(pixel_values)
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
input_ids = torch.cat(input_ids, dim=0)
batch = {
"input_ids": input_ids,
"pixel_values": pixel_values,
}
return batch
class PromptDataset(Dataset):
"A simple dataset to prepare the prompts to generate class images on multiple GPUs."
def __init__(self, prompt, num_samples):
self.prompt = prompt
self.num_samples = num_samples
def __len__(self):
return self.num_samples
def __getitem__(self, index):
example = {}
example["prompt"] = self.prompt
example["index"] = index
return example
def main(args):
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.report_to,
project_dir=logging_dir,
)
if args.report_to == "wandb":
import wandb
wandb.login(key=args.wandb_key)
wandb.init(project=args.wandb_project_name)
# Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
# This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
# TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
raise ValueError(
"Gradient accumulation is not supported when training the text encoder in distributed training. "
"Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Generate class images if prior preservation is enabled.
if args.with_prior_preservation:
class_images_dir = Path(args.class_data_dir)
if not class_images_dir.exists():
class_images_dir.mkdir(parents=True)
cur_class_images = len(list(class_images_dir.iterdir()))
if cur_class_images < args.num_class_images:
torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
if args.prior_generation_precision == "fp32":
torch_dtype = torch.float32
elif args.prior_generation_precision == "fp16":
torch_dtype = torch.float16
elif args.prior_generation_precision == "bf16":
torch_dtype = torch.bfloat16
pipeline = DiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
torch_dtype=torch_dtype,
safety_checker=None,
revision=args.revision,
)
pipeline.set_progress_bar_config(disable=True)
num_new_images = args.num_class_images - cur_class_images
logger.info(f"Number of class images to sample: {num_new_images}.")
sample_dataset = PromptDataset(args.class_prompt, num_new_images)
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
sample_dataloader = accelerator.prepare(sample_dataloader)
pipeline.to(accelerator.device)
for example in tqdm(
sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
):
images = pipeline(example["prompt"]).images
for i, image in enumerate(images):
hash_image = hashlib.sha1(image.tobytes()).hexdigest()
image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
image.save(image_filename)
del pipeline
if torch.cuda.is_available():
torch.cuda.empty_cache()
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
api = HfApi(token=args.hub_token)
# Create repo (repo_name from args or inferred)
repo_name = args.hub_model_id
if repo_name is None:
repo_name = Path(args.output_dir).absolute().name
repo_id = api.create_repo(repo_name, exist_ok=True).repo_id
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
if "step_*" not in gitignore:
gitignore.write("step_*\n")
if "epoch_*" not in gitignore:
gitignore.write("epoch_*\n")
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
# Load the tokenizer
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False)
elif args.pretrained_model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="tokenizer",
revision=args.revision,
use_fast=False,
)
# import correct text encoder class
text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
# Load scheduler and models
noise_scheduler = DDPMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
num_train_timesteps=1000,
) # DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
text_encoder = text_encoder_cls.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
)
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
unet = UNet2DConditionModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
)
if args.use_oft:
config = OFTConfig(
r=args.oft_r,
alpha=args.oft_alpha,
target_modules=UNET_TARGET_MODULES,
module_dropout=args.oft_dropout,
init_weights=True,
coft=args.oft_use_coft,
eps=args.oft_eps,
)
unet = get_peft_model(unet, config)
unet.print_trainable_parameters()
print(unet)
vae.requires_grad_(False)
if not args.train_text_encoder:
text_encoder.requires_grad_(False)
elif args.train_text_encoder and args.use_oft:
config = OFTConfig(
r=args.oft_text_encoder_r,
alpha=args.oft_text_encoder_alpha,
target_modules=TEXT_ENCODER_TARGET_MODULES,
module_dropout=args.oft_text_encoder_dropout,
init_weights=True,
coft=args.oft_text_encoder_use_coft,
eps=args.oft_text_encoder_eps,
)
text_encoder = get_peft_model(text_encoder, config)
text_encoder.print_trainable_parameters()
print(text_encoder)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
# below fails when using oft so commenting it out
if args.train_text_encoder and not args.use_oft:
text_encoder.gradient_checkpointing_enable()
# Enable TF32 for faster training on Ampere GPUs,
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if args.scale_lr:
args.learning_rate = (
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
)
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError(
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
)
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
# Optimizer creation
params_to_optimize = (
itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters()
)
optimizer = optimizer_class(
params_to_optimize,
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
# Dataset and DataLoaders creation:
train_dataset = DreamBoothDataset(
instance_data_root=args.instance_data_dir,
instance_prompt=args.instance_prompt,
class_data_root=args.class_data_dir if args.with_prior_preservation else None,
class_prompt=args.class_prompt,
tokenizer=tokenizer,
size=args.resolution,
center_crop=args.center_crop,
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.train_batch_size,
shuffle=True,
collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation),
num_workers=args.num_dataloader_workers,
)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
num_cycles=args.lr_num_cycles,
power=args.lr_power,
)
# Prepare everything with our `accelerator`.
if args.train_text_encoder:
unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
unet, text_encoder, optimizer, train_dataloader, lr_scheduler
)
else:
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
unet, optimizer, train_dataloader, lr_scheduler
)
# For mixed precision training we cast the text_encoder and vae weights to half-precision
# as these models are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
# Move vae and text_encoder to device and cast to weight_dtype
vae.to(accelerator.device, dtype=weight_dtype)
if not args.train_text_encoder:
text_encoder.to(accelerator.device, dtype=weight_dtype)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
accelerator.init_trackers("dreambooth", config=vars(args))
# Train!
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num batches each epoch = {len(train_dataloader)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
global_step = 0
first_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint != "latest":
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the mos recent checkpoint
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith("checkpoint")]
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
path = dirs[-1]
accelerator.print(f"Resuming from checkpoint {path}")
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split("-")[1])
resume_global_step = global_step * args.gradient_accumulation_steps
first_epoch = resume_global_step // num_update_steps_per_epoch
resume_step = resume_global_step % num_update_steps_per_epoch
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
progress_bar.set_description("Steps")
for epoch in range(first_epoch, args.num_train_epochs):
unet.train()
if args.train_text_encoder:
text_encoder.train()
with TorchTracemalloc() if not args.no_tracemalloc else nullcontext() as tracemalloc:
for step, batch in enumerate(train_dataloader):
# Skip steps until we reach the resumed step
if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
if step % args.gradient_accumulation_steps == 0:
progress_bar.update(1)
if args.report_to == "wandb":
accelerator.print(progress_bar)
continue
with accelerator.accumulate(unet):
# Convert images to latent space
latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * 0.18215
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents)
bsz = latents.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(
0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device
)
timesteps = timesteps.long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
# Get the text embedding for conditioning
encoder_hidden_states = text_encoder(batch["input_ids"])[0]
# Predict the noise residual
model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
# Get the target for loss depending on the prediction type
if noise_scheduler.config.prediction_type == "epsilon":
target = noise
elif noise_scheduler.config.prediction_type == "v_prediction":
target = noise_scheduler.get_velocity(latents, noise, timesteps)
else:
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
if args.with_prior_preservation:
# Chunk the noise and model_pred into two parts and compute the loss on each part separately.
model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
target, target_prior = torch.chunk(target, 2, dim=0)
# Compute instance loss
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
# Compute prior loss
prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
# Add the prior loss to the instance loss.
loss = loss + args.prior_loss_weight * prior_loss
else:
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
accelerator.backward(loss)
if accelerator.sync_gradients:
params_to_clip = (
itertools.chain(unet.parameters(), text_encoder.parameters())
if args.train_text_encoder
else unet.parameters()
)
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
if args.report_to == "wandb":
accelerator.print(progress_bar)
global_step += 1
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if (
args.validation_prompt is not None
and (step + num_update_steps_per_epoch * epoch) % args.validation_steps == 0
):
logger.info(
f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
f" {args.validation_prompt}."
)
# create pipeline
pipeline = DiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
safety_checker=None,
revision=args.revision,
)
# set `keep_fp32_wrapper` to True because we do not want to remove
# mixed precision hooks while we are still training
pipeline.unet = accelerator.unwrap_model(unet, keep_fp32_wrapper=True)
pipeline.text_encoder = accelerator.unwrap_model(text_encoder, keep_fp32_wrapper=True)
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
# run inference
if args.seed is not None:
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
else:
generator = None
images = []
for _ in range(args.num_validation_images):
image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]
images.append(image)
for tracker in accelerator.trackers:
if tracker.name == "tensorboard":
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
if tracker.name == "wandb":
import wandb
tracker.log(
{
"validation": [
wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
for i, image in enumerate(images)
]
}
)
del pipeline
torch.cuda.empty_cache()
if global_step >= args.max_train_steps:
break
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
if not args.no_tracemalloc:
accelerator.print(f"GPU Memory before entering the train : {b2mb(tracemalloc.begin)}")
accelerator.print(f"GPU Memory consumed at the end of the train (end-begin): {tracemalloc.used}")
accelerator.print(f"GPU Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}")
accelerator.print(
f"GPU Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}"
)
accelerator.print(f"CPU Memory before entering the train : {b2mb(tracemalloc.cpu_begin)}")
accelerator.print(f"CPU Memory consumed at the end of the train (end-begin): {tracemalloc.cpu_used}")
accelerator.print(f"CPU Peak Memory consumed during the train (max-begin): {tracemalloc.cpu_peaked}")
accelerator.print(
f"CPU Total Peak Memory consumed during the train (max): {tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin)}"
)
# Create the pipeline using using the trained modules and save it.
accelerator.wait_for_everyone()
if accelerator.is_main_process:
if args.use_oft:
unwarpped_unet = accelerator.unwrap_model(unet)
unwarpped_unet.save_pretrained(
os.path.join(args.output_dir, "unet"), state_dict=accelerator.get_state_dict(unet)
)
if args.train_text_encoder:
unwarpped_text_encoder = accelerator.unwrap_model(text_encoder)
unwarpped_text_encoder.save_pretrained(
os.path.join(args.output_dir, "text_encoder"),
state_dict=accelerator.get_state_dict(text_encoder),
)
else:
pipeline = DiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
unet=accelerator.unwrap_model(unet),
text_encoder=accelerator.unwrap_model(text_encoder),
revision=args.revision,
)
pipeline.save_pretrained(args.output_dir)
if args.push_to_hub:
api.upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message="End of training",
run_as_future=True,
)
accelerator.end_training()
if __name__ == "__main__":
args = parse_args()
main(args)
| peft/examples/oft_dreambooth/train_dreambooth.py/0 | {
"file_path": "peft/examples/oft_dreambooth/train_dreambooth.py",
"repo_id": "peft",
"token_count": 20006
} |
<jupyter_start><jupyter_text>Using VB-LoRA for sequence classification In this example, we fine-tune Roberta on a sequence classification task using VB-LoRA.This notebook is adapted from `examples/sequence_classification/VeRA.ipynb`. Imports<jupyter_code>import torch
from torch.optim import AdamW
from torch.utils.data import DataLoader
from peft import (
get_peft_model,
VBLoRAConfig,
PeftType,
)
import evaluate
from datasets import load_dataset
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup
from tqdm import tqdm<jupyter_output><empty_output><jupyter_text>Parameters<jupyter_code>batch_size = 32
model_name_or_path = "roberta-large"
task = "mrpc"
peft_type = PeftType.VBLORA
device = "cuda"
num_epochs = 20
rank = 4
max_length = 128
num_vectors = 90
vector_length = 256
torch.manual_seed(0)
peft_config = VBLoRAConfig(
task_type="SEQ_CLS",
r=rank,
topk=2,
target_modules=['key', 'value', 'query', 'output.dense', 'intermediate.dense'],
num_vectors=num_vectors,
vector_length=vector_length,
save_only_topk_weights=True, # Set to True to reduce storage space. Note that the saved parameters cannot be used to resume training from checkpoints.
vblora_dropout=0.,
)
head_lr = 4e-3
vector_bank_lr = 1e-3
logits_lr = 1e-2<jupyter_output><empty_output><jupyter_text>Loading data<jupyter_code>if any(k in model_name_or_path for k in ("gpt", "opt", "bloom")):
padding_side = "left"
else:
padding_side = "right"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side=padding_side)
if getattr(tokenizer, "pad_token_id") is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
datasets = load_dataset("glue", task)
metric = evaluate.load("glue", task)
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=max_length)
return outputs
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
# Instantiate dataloaders.
train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size
)<jupyter_output><empty_output><jupyter_text>Preparing the VB-LoRA model<jupyter_code>model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, return_dict=True, max_length=None)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
model.print_savable_parameters()
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
from transformers.trainer_pt_utils import get_parameter_names
decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
vector_bank_parameters = [name for name, _ in model.named_parameters() if "vector_bank" in name]
logits_parameters = [name for name, _ in model.named_parameters() if "logits" in name ]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if n in decay_parameters and \
n not in logits_parameters and n not in vector_bank_parameters],
"weight_decay": 0.1,
"lr": head_lr,
},
{
"params": [p for n, p in model.named_parameters() if n not in decay_parameters and \
n not in logits_parameters and n not in vector_bank_parameters],
"weight_decay": 0.0,
"lr": head_lr,
},
{
"params": [p for n, p in model.named_parameters() if n in vector_bank_parameters],
"lr": vector_bank_lr,
"weight_decay": 0.0,
},
{
"params": [p for n, p in model.named_parameters() if n in logits_parameters],
"lr": logits_lr,
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=0.06 * (len(train_dataloader) * num_epochs),
num_training_steps=(len(train_dataloader) * num_epochs),
)<jupyter_output><empty_output><jupyter_text>Training<jupyter_code>model.to(device)
for epoch in range(num_epochs):
model.train()
for step, batch in enumerate(tqdm(train_dataloader)):
batch.to(device)
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(tqdm(eval_dataloader)):
batch.to(device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = predictions, batch["labels"]
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
print(f"epoch {epoch}:", eval_metric)<jupyter_output>0%| | 0/115 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.
100%|██████████| 115/115 [00:34<00:00, 3.33it/s]
100%|██████████| 13/13 [00:01<00:00, 7.84it/s]<jupyter_text>Share adapters on the 🤗 Hub<jupyter_code>account_id = ... # your Hugging Face Hub account ID
model.push_to_hub(f"{account_id}/roberta-large-peft-vblora")<jupyter_output><empty_output><jupyter_text>Load adapters from the HubYou can also directly load adapters from the Hub using the commands below:<jupyter_code>import torch
from peft import PeftModel, PeftConfig
from transformers import AutoTokenizer
peft_model_id = f"{account_id}/roberta-large-peft-vblora"
config = PeftConfig.from_pretrained(peft_model_id)
inference_model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
# Load the model
inference_model = PeftModel.from_pretrained(inference_model, peft_model_id)
inference_model.to(device)
inference_model.eval()
for step, batch in enumerate(tqdm(eval_dataloader)):
batch.to(device)
with torch.no_grad():
outputs = inference_model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = predictions, batch["labels"]
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
print(eval_metric)<jupyter_output>0%| | 0/13 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.
100%|██████████| 13/13 [00:01<00:00, 7.81it/s] | peft/examples/sequence_classification/VBLoRA.ipynb/0 | {
"file_path": "peft/examples/sequence_classification/VBLoRA.ipynb",
"repo_id": "peft",
"token_count": 2869
} |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://botorch.org/api/_modules/botorch/utils/torch.html
# TODO: To be removed once (if) https://github.com/pytorch/pytorch/pull/37385 lands
from __future__ import annotations
import collections
from collections import OrderedDict
import torch
from torch.nn import Module
class BufferDict(Module):
r"""
Holds buffers in a dictionary.
BufferDict can be indexed like a regular Python dictionary, but buffers it contains are properly registered, and
will be visible by all Module methods. `torch.nn.BufferDict` is an **ordered** dictionary that respects
* the order of insertion, and
* in `torch.nn.BufferDict.update`, the order of the merged `OrderedDict` or another `torch.nn.BufferDict` (the
argument to `torch.nn.BufferDict.update`).
Note that `torch.nn.BufferDict.update` with other unordered mapping types (e.g., Python's plain `dict`) does not
preserve the order of the merged mapping.
Args:
buffers (iterable, optional):
a mapping (dictionary) of (string : `torch.Tensor`) or an iterable of key-value pairs of type (string,
`torch.Tensor`)
```python
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.buffers = nn.BufferDict({"left": torch.randn(5, 10), "right": torch.randn(5, 10)})
def forward(self, x, choice):
x = self.buffers[choice].mm(x)
return x
```
"""
def __init__(self, buffers=None, persistent: bool = False):
r"""
Args:
buffers (`dict`):
A mapping (dictionary) from string to `torch.Tensor`, or an iterable of key-value pairs of type
(string, `torch.Tensor`).
"""
super().__init__()
if buffers is not None:
self.update(buffers)
self.persistent = persistent
def __getitem__(self, key):
return self._buffers[key]
def __setitem__(self, key, buffer):
self.register_buffer(key, buffer, persistent=self.persistent)
def __delitem__(self, key):
del self._buffers[key]
def __len__(self):
return len(self._buffers)
def __iter__(self):
return iter(self._buffers.keys())
def __contains__(self, key):
return key in self._buffers
def clear(self):
"""Remove all items from the BufferDict."""
self._buffers.clear()
def pop(self, key):
r"""Remove key from the BufferDict and return its buffer.
Args:
key (`str`):
Key to pop from the BufferDict
"""
v = self[key]
del self[key]
return v
def keys(self):
r"""Return an iterable of the BufferDict keys."""
return self._buffers.keys()
def items(self):
r"""Return an iterable of the BufferDict key/value pairs."""
return self._buffers.items()
def values(self):
r"""Return an iterable of the BufferDict values."""
return self._buffers.values()
def update(self, buffers):
r"""
Update the `torch.nn.BufferDict` with the key-value pairs from a mapping or an iterable, overwriting existing
keys.
Note:
If `buffers` is an `OrderedDict`, a `torch.nn.BufferDict`, or an iterable of key-value pairs, the order of
new elements in it is preserved.
Args:
buffers (iterable):
a mapping (dictionary) from string to `torch.Tensor`, or an iterable of key-value pairs of type
(string, `torch.Tensor`).
"""
if not isinstance(buffers, collections.abc.Iterable):
raise TypeError(
"BuffersDict.update should be called with an "
"iterable of key/value pairs, but got " + type(buffers).__name__
)
if isinstance(buffers, collections.abc.Mapping):
if isinstance(buffers, (OrderedDict, BufferDict)):
for key, buffer in buffers.items():
self[key] = buffer
else:
for key, buffer in sorted(buffers.items()):
self[key] = buffer
else:
for j, p in enumerate(buffers):
if not isinstance(p, collections.abc.Iterable):
raise TypeError(
"BufferDict update sequence element #" + str(j) + " should be Iterable; is" + type(p).__name__
)
if not len(p) == 2:
raise ValueError(
"BufferDict update sequence element "
"#" + str(j) + " has length " + str(len(p)) + "; 2 is required"
)
self[p[0]] = p[1]
def extra_repr(self):
child_lines = []
for k, p in self._buffers.items():
size_str = "x".join(str(size) for size in p.size())
device_str = "" if not p.is_cuda else f" (GPU {p.get_device()})"
parastr = f"Buffer containing: [{torch.typename(p)} of size {size_str}{device_str}]"
child_lines.append(" (" + k + "): " + parastr)
tmpstr = "\n".join(child_lines)
return tmpstr
def __call__(self, input):
raise RuntimeError("BufferDict should not be called.")
| peft/src/peft/tuners/_buffer_dict.py/0 | {
"file_path": "peft/src/peft/tuners/_buffer_dict.py",
"repo_id": "peft",
"token_count": 2442
} |
// Author: Yao Feng
// Date: 2023/08
// Description: cuda kernel for fast block diag
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
namespace{
template <typename scalar_t>
__global__ void forward_fast_block_diag_cuda_kernel(
const scalar_t* __restrict__ input, //[z, N, b, b]
scalar_t* output, //[z, Nxb, Nxb]
int z, int N, int b
) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= z*N*b*b) {
return;
}
const int zi = i/(N*b*b);
const int Ni = (i%(N*b*b))/(b*b);
const int x = ((i%(N*b*b))%(b*b))/b;
const int y = ((i%(N*b*b))%(b*b))%b;
output[zi*N*b*N*b + (Ni*b+x)*N*b + Ni*b + y] = input[zi*N*b*b + Ni*b*b + x*b + y];
}
template <typename scalar_t>
__global__ void backward_fast_block_diag_cuda_kernel(
const scalar_t* __restrict__ grad_output,
scalar_t* grad_input,
int z, int N, int b
) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= z*N*b*b) {
return;
}
const int zi = i/(N*b*b);
const int Ni = (i%(N*b*b))/(b*b);
const int x = ((i%(N*b*b))%(b*b))/b;
const int y = ((i%(N*b*b))%(b*b))%b;
grad_input[zi*N*b*b + Ni*b*b + x*b + y] = grad_output[zi*N*b*N*b + (Ni*b+x)*N*b + Ni*b + y];
} // namespace
}
std::vector<at::Tensor> forward_fast_block_diag_cuda(
at::Tensor input
){
const auto z = input.size(0);
const auto N = input.size(1);
const auto b = input.size(2);
// print(channel_size)
const int threads = 512;
const dim3 blocks_1 ((z*N*b*b - 1) / threads +1);
// initlaize output
auto output = at::zeros({z, N*b, N*b}, input.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "forward_fast_block_diag1", ([&] {
forward_fast_block_diag_cuda_kernel<scalar_t><<<blocks_1, threads>>>(
input.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
z, N, b);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in forward_fast_block_diag_cuda_kernel: %s\n", cudaGetErrorString(err));
return {output};
}
std::vector<at::Tensor> backward_fast_block_diag_cuda(
at::Tensor grad_output,
at::Tensor input
){
const auto z = input.size(0);
const auto N = input.size(1);
const auto b = input.size(2);
// print(channel_size)
const int threads = 512;
const dim3 blocks_1 ((z*N*b*b - 1) / threads +1);
// initialize grad input
auto grad_input = at::zeros_like(input);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_output.type(), "backward_fast_block_diag", ([&] {
backward_fast_block_diag_cuda_kernel<scalar_t><<<blocks_1, threads>>>(
grad_output.data_ptr<scalar_t>(),
grad_input.data_ptr<scalar_t>(),
z, N, b);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in backward_fast_block_diag_cuda_kernel: %s\n", cudaGetErrorString(err));
return {grad_input};
}
| peft/src/peft/tuners/boft/fbd/fbd_cuda_kernel.cu/0 | {
"file_path": "peft/src/peft/tuners/boft/fbd/fbd_cuda_kernel.cu",
"repo_id": "peft",
"token_count": 1511
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import warnings
from typing import Any, List, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
class HRALayer(BaseTunerLayer):
# All names of layers that may contain (trainable) adapter weights
adapter_layer_names = ("hra_u",)
# All names of other parameters that may contain adapter-related parameters
other_param_names = ("hra_r", "hra_apply_GS")
def __init__(self, base_layer: nn.Module, **kwargs) -> None:
self.base_layer = base_layer
self.hra_r = {}
self.hra_apply_GS = {}
self.hra_u = nn.ParameterDict({})
# Mark the weight as unmerged
self._disable_adapters = False
self.merged_adapters = []
self.kwargs = kwargs
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
self.in_features, self.out_features = base_layer.in_features, base_layer.out_features
elif isinstance(base_layer, nn.Conv2d):
self.in_features, self.out_features = base_layer.in_channels, base_layer.out_channels
else:
raise ValueError(f"Unsupported layer type {type(base_layer)}")
def update_layer(
self,
adapter_name: str,
r: int,
apply_GS: bool,
init_weights: bool,
**kwargs,
) -> None:
"""Internal function to create hra adapter
Args:
adapter_name (`str`): Name for the adapter to add.
r (`int`): Rank for the added adapter.
init_weights (`bool`): Whether to initialize weights.
apply_GS (`bool`): Whether to apply Gram-Schmidt orthogonalization or not.
"""
if r <= 0:
raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
self.hra_r[adapter_name] = r
self.hra_apply_GS[adapter_name] = apply_GS
# Determine shape of HRA weights
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
self.hra_u[adapter_name] = nn.Parameter(torch.empty(self.in_features, r), requires_grad=True)
elif isinstance(base_layer, nn.Conv2d):
self.hra_u[adapter_name] = nn.Parameter(
torch.empty(self.in_features * base_layer.kernel_size[0] * base_layer.kernel_size[0], r),
requires_grad=True,
)
else:
raise TypeError(f"HRA is not implemented for base layers of type {type(base_layer).__name__}")
# Initialize weights
if init_weights:
self.reset_hra_parameters(adapter_name)
else:
self.reset_hra_parameters_random(adapter_name)
# Move new weights to device
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def reset_hra_parameters(self, adapter_name: str):
if self.hra_r[adapter_name] % 2 != 0:
warnings.warn("The symmetric initialization can NOT be performed when r is odd!")
nn.init.kaiming_uniform_(self.hra_u[adapter_name], a=math.sqrt(5))
else:
shape = self.hra_u[adapter_name].shape
half_u = torch.zeros(shape[0], shape[1] // 2)
nn.init.kaiming_uniform_(half_u, a=math.sqrt(5))
self.hra_u[adapter_name] = nn.Parameter(torch.repeat_interleave(half_u, 2, dim=1))
def reset_hra_parameters_random(self, adapter_name: str):
nn.init.kaiming_uniform_(self.hra_u[adapter_name], a=math.sqrt(5))
def scale_layer(self, scale: float) -> None:
if scale == 1:
return
for active_adapter in self.active_adapters:
if active_adapter not in self.hra_u.keys():
continue
warnings.warn("Scaling operation for HRA not supported! Automatically set scale to 1.")
def unscale_layer(self, scale=None) -> None:
for active_adapter in self.active_adapters:
if active_adapter not in self.hra_u.keys():
continue
warnings.warn("Unscaling operation for HRA not supported! Keeping scale at 1.")
class HRALinear(nn.Module, HRALayer):
"""
HRA implemented in a dense layer.
"""
def __init__(
self,
base_layer,
adapter_name: str,
r: int = 0,
apply_GS: bool = False,
init_weights: Union[bool, str] = True,
**kwargs,
) -> None:
super().__init__()
HRALayer.__init__(self, base_layer, **kwargs)
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, apply_GS, init_weights, **kwargs)
def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If `None`, all active adapters will be merged.
Defaults to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.hra_u.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weight = base_layer.weight.data.clone()
delta_weight = self.get_delta_weight(active_adapter)
orig_weight = torch.mm(orig_weight, delta_weight)
if not torch.isfinite(orig_weight).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
self.base_layer.weight.data = orig_weight
else:
delta_weight = self.get_delta_weight(active_adapter)
self.base_layer.weight.data = torch.mm(self.base_layer.weight.data, delta_weight)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.hra_u.keys():
orig_weight = self.get_base_layer().weight.data.clone()
delta_weight = self.get_delta_weight(active_adapter, reverse=True)
self.get_base_layer().weight.data = torch.mm(orig_weight, delta_weight)
def get_delta_weight(self, adapter_name: str, reverse: bool = False) -> torch.Tensor:
rank = self.hra_r[adapter_name]
apply_GS = self.hra_apply_GS[adapter_name]
opt_u = self.hra_u[adapter_name]
shape = opt_u.shape
if apply_GS:
weight = [(opt_u[:, 0] / opt_u[:, 0].norm()).view(-1, 1)]
for i in range(1, rank):
ui = opt_u[:, i].view(-1, 1)
for j in range(i):
ui = ui - (weight[j].t() @ ui) * weight[j]
weight.append((ui / ui.norm()).view(-1, 1))
weight = torch.cat(weight, dim=1)
weight = torch.eye(shape[0], device=opt_u.device, dtype=opt_u.dtype) - 2 * weight @ weight.t()
else:
opt_u = opt_u / opt_u.norm(dim=0)
weight = torch.eye(shape[0], device=opt_u.device, dtype=opt_u.dtype)
if reverse:
indices = range(rank - 1, -1, -1)
else:
indices = range(rank)
for i in indices:
ui = opt_u[:, i].view(-1, 1)
weight = weight - 2 * weight @ ui @ ui.t()
return weight
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
new_weight = torch.eye(self.in_features, device=x.device)
for active_adapter in self.active_adapters:
if active_adapter not in self.hra_u.keys():
continue
delta_weight = self.get_delta_weight(active_adapter)
new_weight = torch.mm(new_weight, delta_weight)
x = x.to(self.get_base_layer().weight.data.dtype)
orig_weight = self.get_base_layer().weight.data
new_weight = torch.mm(orig_weight, new_weight)
result = F.linear(input=x, weight=new_weight, bias=self.base_layer.bias)
result = result.to(previous_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "hra." + rep
class HRAConv2d(nn.Module, HRALayer):
"""HRA implemented in Conv2d layer"""
def __init__(
self,
base_layer,
adapter_name: str,
r: int = 0,
apply_GS: bool = False,
init_weights: Union[bool, str] = True,
**kwargs,
):
super().__init__()
HRALayer.__init__(self, base_layer)
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, apply_GS, init_weights, **kwargs)
def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If `None`, all active adapters will be merged.
Defaults to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.hra_u.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weight = base_layer.weight.data.clone()
orig_weight = orig_weight.view(
self.out_features,
self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0],
)
delta_weight = self.get_delta_weight(active_adapter)
orig_weight = torch.mm(orig_weight, delta_weight)
orig_weight = orig_weight.view(
self.out_features,
self.in_features,
self.base_layer.kernel_size[0],
self.base_layer.kernel_size[0],
)
if not torch.isfinite(orig_weight).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
self.base_layer.weight.data = orig_weight
else:
orig_weight = base_layer.weight.data
orig_weight = orig_weight.view(
self.out_features,
self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0],
)
delta_weight = self.get_delta_weight(active_adapter)
orig_weight = torch.mm(orig_weight, delta_weight)
orig_weight = orig_weight.view(
self.out_features,
self.in_features,
self.base_layer.kernel_size[0],
self.base_layer.kernel_size[0],
)
self.base_layer.weight.data = orig_weight
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.hra_u.keys():
orig_weight = self.get_base_layer().weight.data.clone()
orig_weight = orig_weight.view(
self.out_features,
self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0],
)
delta_weight = self.get_delta_weight(active_adapter, reverse=True)
orig_weight = torch.mm(orig_weight, delta_weight)
orig_weight = orig_weight.view(
self.out_features, self.in_features, self.base_layer.kernel_size[0], self.base_layer.kernel_size[0]
)
self.get_base_layer().weight.data = orig_weight
def get_delta_weight(self, adapter_name: str, reverse: bool = False) -> torch.Tensor:
rank = self.hra_r[adapter_name]
apply_GS = self.hra_apply_GS[adapter_name]
opt_u = self.hra_u[adapter_name]
shape = opt_u.shape
if apply_GS:
weight = [(opt_u[:, 0] / opt_u[:, 0].norm()).view(-1, 1)]
for i in range(1, rank):
ui = opt_u[:, i].view(-1, 1)
for j in range(i):
ui = ui - (weight[j].t() @ ui) * weight[j]
weight.append((ui / ui.norm()).view(-1, 1))
weight = torch.cat(weight, dim=1)
weight = torch.eye(shape[0], device=opt_u.device, dtype=opt_u.dtype) - 2 * weight @ weight.t()
else:
opt_u = opt_u / opt_u.norm(dim=0)
weight = torch.eye(shape[0], device=opt_u.device, dtype=opt_u.dtype)
if reverse:
indices = range(rank - 1, -1, -1)
else:
indices = range(rank)
for i in indices:
ui = opt_u[:, i].view(-1, 1)
weight = weight - 2 * weight @ ui @ ui.t()
return weight
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
new_weight = torch.eye(
self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0],
device=x.device,
dtype=previous_dtype,
)
for active_adapter in self.active_adapters:
if active_adapter not in self.hra_u.keys():
continue
delta_weight = self.get_delta_weight(active_adapter)
new_weight = torch.mm(new_weight, delta_weight)
x = x.to(self.base_layer.weight.data.dtype)
orig_weight = self.base_layer.weight.data
orig_weight = orig_weight.view(
self.out_features,
self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0],
)
new_weight = torch.mm(orig_weight, new_weight)
new_weight = new_weight.view(
self.out_features,
self.in_features,
self.base_layer.kernel_size[0],
self.base_layer.kernel_size[0],
)
result = F.conv2d(
input=x,
weight=new_weight,
bias=self.base_layer.bias,
padding=self.base_layer.padding[0],
stride=self.base_layer.stride[0],
)
result = result.to(previous_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "hra." + rep
| peft/src/peft/tuners/hra/layer.py/0 | {
"file_path": "peft/src/peft/tuners/hra/layer.py",
"repo_id": "peft",
"token_count": 8755
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from typing import Any, Optional
import torch
# from torch import nn
from peft.import_utils import is_torchao_available
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from .config import LoraConfig
from .layer import Linear
class TorchaoLoraLinear(Linear):
"""LoRA layer implementation for Linear layers using torchao data"""
def __init__(self, *args, get_apply_tensor_subclass, **kwargs):
# this is not strictly necessary, as kwargs are stored either way, but we want to error early if
# get_apply_tensor_subclass is missing.
if kwargs.get("lora_bias", False):
raise ValueError(f"{self.__class__.__name__} does not support lora_bias yet, set it to False")
super().__init__(*args, **kwargs)
self.get_apply_tensor_subclass = get_apply_tensor_subclass
self._check_dtype_supported()
def _check_dtype_supported(self):
# TODO: Not required once int4_weight_only is properly supported by torchao
base_layer = self.get_base_layer()
weight = base_layer.weight
# pytest tests/test_gpu_examples.py::PeftTorchaoGPUTests::test_causal_lm_training_single_gpu_torchao_0_int8_weight_only
if (
# torchao 0.7.0+
(hasattr(weight, "tensor_impl") and (weight.tensor_impl.data.dtype != torch.int8))
or
# torchao < 0.7.0
(hasattr(weight, "layout_tensor") and (weight.layout_tensor.data.dtype != torch.int8))
):
raise ValueError(f"{type(self).__name__} only supports int8 weights for now.")
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
from torchao import quantize_
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
self._check_dtype_supported()
base_layer = self.get_base_layer()
weight = base_layer.weight
for active_adapter in adapter_names:
try:
weight = weight.dequantize()
except NotImplementedError as exc:
msg = (
f"Weights of type {type(weight).__name__} do not support dequantization (yet), which is needed to "
"support merging."
)
raise NotImplementedError(msg) from exc
if safe_merge and not torch.isfinite(weight).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
weight += self.get_delta_weight(active_adapter)
# TODO: once (if) torchao supports directly mutating the data, use that instead.
del base_layer.weight
base_layer.weight = weight
quantize_(base_layer, self.get_apply_tensor_subclass())
del weight
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
from torchao import quantize_
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter not in self.lora_A.keys():
continue
base_layer = self.get_base_layer()
weight = base_layer.weight
try:
weight = weight.dequantize()
except NotImplementedError as exc:
msg = (
f"Weights of type {type(weight).__name__} do not support dequantization (yet), which is needed to "
"support unmerging."
)
raise NotImplementedError(msg) from exc
weight -= self.get_delta_weight(active_adapter)
# We go through a dummy module because overriding the weight.data does not work, the tensor retains the old
# data. Therefore, we need to go through quantize_, which takes a module as input, and we need to delete and
# re-assign the weight.
# TODO: once (if) torchao supports directly mutating the data, use that instead.
del base_layer.weight
base_layer.weight = weight
quantize_(base_layer, self.get_apply_tensor_subclass())
del weight
def __repr__(self) -> str:
rep = super().__repr__()
return rep.replace("lora.Linear", f"lora.{self.__class__.__name__}")
def dispatch_torchao(
target: torch.nn.Module,
adapter_name: str,
lora_config: LoraConfig,
**kwargs: Any,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if not hasattr(target_base_layer, "weight"):
return new_module
if not is_torchao_available():
return new_module
from torchao.dtypes import AffineQuantizedTensor
from torchao.quantization import LinearActivationQuantizedTensor
if isinstance(target_base_layer.weight, (AffineQuantizedTensor, LinearActivationQuantizedTensor)):
new_module = TorchaoLoraLinear(target, adapter_name, **kwargs)
return new_module
| peft/src/peft/tuners/lora/torchao.py/0 | {
"file_path": "peft/src/peft/tuners/lora/torchao.py",
"repo_id": "peft",
"token_count": 2496
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Literal, Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class PolyConfig(PeftConfig):
"""
This is the configuration class to store the configuration of a [`PolyModel`].
- [Polytropon (Poly)](https://arxiv.org/abs/2202.13914)
- [Multi-Head Routing (MHR)](https://arxiv.org/abs/2211.03831)
Args:
r (`int`): Attention dimension of each Lora in Poly.
target_modules (`Union[List[str],str]`): The names of the modules to apply Poly to.
exclude_modules (`Optional[Union[List[str], str]]`):
The names of the modules to not apply the adapter. When passing a string, a regex match will be performed.
When passing a list of strings, either an exact match will be performed or it is checked if the name of the
module ends with any of the passed strings.
modules_to_save (`List[str]`): List of modules apart from Poly layers to be set as trainable
and saved in the final checkpoint.
init_weights (bool): Whether to perform initialization of Poly weights.
poly_type (`Literal["poly"]`): The variant of the Poly module to use. Currently, only "poly"
is supported.
n_tasks (`int`): The number of tasks in a multitasking scenario.
n_skills (`int`): The number of skills (LoRA) in each Poly layer.
n_splits (`int`): The number of splits within each LoRA of a Poly layer. A value greater
than 1 indicates the use of Multi-Head Routing (MHR).
"""
r: int = field(default=8, metadata={"help": "Lora attention dimension"})
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": "List of module names or regex expression of the module names to replace with Poly."
"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' "
},
)
exclude_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={"help": "List of module names or regex expression of the module names to exclude from Poly."},
)
modules_to_save: Optional[list[str]] = field(
default=None,
metadata={
"help": "List of modules apart from Poly layers to be set as trainable and saved in the final checkpoint. "
"For example, in Sequence Classification or Token Classification tasks, "
"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
},
)
init_weights: bool = field(
default=True,
metadata={
"help": (
"Whether to initialize the weights of the Poly layers with their default initialization. Don't change "
"this setting, except if you know exactly what you're doing."
),
},
)
poly_type: Literal["poly"] = field(
default="poly",
metadata={"help": 'Type of Poly modules to be used. Currently only "poly" is supported.'},
)
n_tasks: int = field(
default=1,
metadata={"help": "Number of tasks in multitasking scenario."},
)
n_skills: int = field(
default=4,
metadata={"help": "Number of skills (LoRA) in each Poly layer."},
)
n_splits: int = field(
default=1,
metadata={"help": "Number of splits within each LoRA of a Poly layer."},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.POLY
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
self.exclude_modules = (
set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules
)
| peft/src/peft/tuners/poly/config.py/0 | {
"file_path": "peft/src/peft/tuners/poly/config.py",
"repo_id": "peft",
"token_count": 1681
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from typing import Optional
import bitsandbytes as bnb
import torch
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from peft.tuners.tuners_utils import check_adapters_to_merge
from peft.utils.integrations import dequantize_bnb_weight
from peft.utils.other import transpose
from .layer import VeraLayer
if is_bnb_available():
class Linear8bitLt(torch.nn.Module, VeraLayer):
def __init__(
self,
base_layer: torch.nn.Module,
adapter_name: str,
vera_A,
vera_B,
r: int = 0,
vera_dropout: float = 0.0,
fan_in_fan_out: bool = False,
init_weights: bool = True,
d_initial: float = 0.1,
**kwargs,
) -> None:
super().__init__()
VeraLayer.__init__(self, base_layer)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
vera_A,
vera_B,
r,
vera_dropout=vera_dropout,
init_weights=init_weights,
d_initial=d_initial,
)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
if self.merged:
warnings.warn(
f"Already following adapters were merged {','.join(self.merged_adapters)}. "
f"You are now additionally merging {','.join(self.active_adapters)}."
)
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter not in self.vera_lambda_d.keys():
continue
warnings.warn(
"Merge vera module to 8-bit linear may get different generations due to rounding errors."
)
vera_data = self.get_delta_weight(active_adapter)
weight = self.get_base_layer().weight
state = self.get_base_layer().state
if state.SCB is None:
state.SCB = weight.SCB
output = dequantize_bnb_weight(weight, state)
w_data = output.to(vera_data.dtype).to(vera_data.device) + vera_data
if safe_merge and not torch.isfinite(w_data).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
self.get_base_layer().weight = bnb.nn.Int8Params(
w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights
).to(weight.device)
state.reset_grads()
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn("Already unmerged. Nothing to do")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter not in self.vera_lambda_d.keys():
continue
warnings.warn(
"Unmerge vera module to 8-bit linear may get different generations due to rounding errors."
)
vera_data = self.get_delta_weight(active_adapter)
weight = self.get_base_layer().weight
state = self.get_base_layer().state
if state.SCB is None:
state.SCB = weight.SCB
output = dequantize_bnb_weight(weight, state=state)
w_data = output.to(vera_data.dtype).to(vera_data.device) - vera_data
self.get_base_layer().weight = bnb.nn.Int8Params(
w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights
).to(weight.device)
state.reset_grads()
def get_delta_weight(self, adapter) -> torch.Tensor:
"""
Compute the delta weight for the given adapter.
Args:
adapter (str): The name of the adapter for which the delta weight should be computed.
Returns:
torch.Tensor: The computed delta weight for the VeRA adapter.
Note:
This method implements the VeRA-specific weight update. Unlike LoRA, VeRA uses shared projection
matrices (vera_A and vera_B) across all layers, along with per-layer trainable parameters (lambda_d and
lambda_b).
"""
# Retrieve shared projection matrices
vera_A = self.vera_A[adapter]
vera_B = self.vera_B[adapter]
# Retrieve per-layer trainable parameters
device = vera_B.device
dtype = vera_B.dtype
# In case users wants to merge the adapter weights that are in
# (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to
# (b)float16 because some CPUs have slow bf16/fp16 matmuls.
cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16)
lambda_d = self.vera_lambda_d[adapter]
lambda_b = self.vera_lambda_b[adapter]
if cast_to_fp32:
vera_A = vera_A.float()
vera_B = vera_B.float()
lambda_d = lambda_d.float()
lambda_b = lambda_b.float()
sliced_A = vera_A[:, : self.in_features].to(lambda_d.device)
sliced_B = vera_B[: self.out_features, :].to(lambda_d.device)
lambda_b = lambda_b.unsqueeze(-1)
lambda_d = lambda_d.unsqueeze(-1)
# VeRA-specific computation:
# 1. Apply lambda_d to the input projection (vera_A)
# 2. Apply lambda_b to the output projection (vera_B)
# 3. Compute the outer product of the scaled projections
output_tensor = transpose((lambda_b * sliced_B) @ (lambda_d * sliced_A), self.fan_in_fan_out)
if cast_to_fp32:
output_tensor = output_tensor.to(dtype=dtype)
return output_tensor
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
"""
Perform the forward pass using the VeRA adapter.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Output tensor after applying the VeRA adaptation.
Note:
This method implements the VeRA-specific forward pass. It applies the shared projections (vera_A and
vera_B) along with the per-layer trainable parameters (lambda_d and lambda_b) to compute the adapter
output.
"""
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.vera_lambda_d.keys():
continue
lambda_d = self.vera_lambda_d[active_adapter]
lambda_b = self.vera_lambda_b[active_adapter]
vera_A = self.vera_A[active_adapter]
vera_B = self.vera_B[active_adapter]
dropout = self.vera_dropout[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
compute_dtype = lambda_d.dtype
if x.dtype != compute_dtype:
x = x.to(compute_dtype)
sliced_A = vera_A[:, : self.in_features].to(x.device)
sliced_B = vera_B[: self.out_features, :].to(x.device)
x_temp = dropout(x.to(lambda_d.dtype))
adapter_output = lambda_b * torch.nn.functional.linear(
lambda_d * torch.nn.functional.linear(x_temp, sliced_A), sliced_B
)
if requires_conversion:
adapter_output = adapter_output.to(expected_dtype)
result = result + adapter_output
# Ensure the output tensor has the same dtype as the input tensor
return result.to(x.dtype)
def __repr__(self) -> str:
rep = super().__repr__()
return "vera." + rep
if is_bnb_4bit_available():
class Linear4bit(torch.nn.Module, VeraLayer):
def __init__(
self,
base_layer: torch.nn.Module,
adapter_name: str,
vera_A,
vera_B,
r: int = 0,
vera_dropout: float = 0.0,
fan_in_fan_out: bool = False,
init_weights: bool = True,
d_initial: float = 0.1,
**kwargs,
) -> None:
super().__init__()
VeraLayer.__init__(self, base_layer)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
vera_A,
vera_B,
r,
vera_dropout=vera_dropout,
init_weights=init_weights,
d_initial=d_initial,
)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
if self.merged:
warnings.warn(
f"Already following adapters were merged {','.join(self.merged_adapters)}. "
f"You are now additionally merging {','.join(self.active_adapters)}."
)
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter not in self.vera_lambda_d.keys():
continue
warnings.warn(
"Merge vera module to 4-bit linear may get different generations due to rounding errors."
)
vera_data = self.get_delta_weight(active_adapter)
weight = self.get_base_layer().weight
kwargs = weight.__dict__
# torch.compile can introduce attributes preceded by '_', remove them
kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_")}
w_data = bnb.functional.dequantize_4bit(weight.data, weight.quant_state) + vera_data
if safe_merge and not torch.isfinite(w_data).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), requires_grad=False, **kwargs).to(
weight.device
)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn("Already unmerged. Nothing to do")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter not in self.vera_lambda_d.keys():
continue
warnings.warn(
"Unmerge vera module to 4-bit linear may get different generations due to rounding errors."
)
vera_data = self.get_delta_weight(active_adapter)
weight = self.get_base_layer().weight
kwargs = weight.__dict__
w_data = bnb.functional.dequantize_4bit(weight.data, weight.quant_state) - vera_data
self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), requires_grad=False, **kwargs).to(
weight.device
)
def get_delta_weight(self, adapter) -> torch.Tensor:
vera_A = self.vera_A[adapter]
vera_B = self.vera_B[adapter]
device = vera_B.device
dtype = vera_B.dtype
cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16)
lambda_d = self.vera_lambda_d[adapter]
lambda_b = self.vera_lambda_b[adapter]
if cast_to_fp32:
vera_A = vera_A.float()
vera_B = vera_B.float()
lambda_d = lambda_d.float()
lambda_b = lambda_b.float()
sliced_A = vera_A[:, : self.in_features].to(lambda_d.device)
sliced_B = vera_B[: self.out_features, :].to(lambda_d.device)
lambda_b = lambda_b.unsqueeze(-1)
lambda_d = lambda_d.unsqueeze(-1)
output_tensor = transpose((lambda_b * sliced_B) @ (lambda_d * sliced_A), self.fan_in_fan_out)
if cast_to_fp32:
output_tensor = output_tensor.to(dtype=dtype)
return output_tensor
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
result = result.clone()
for active_adapter in self.active_adapters:
if active_adapter not in self.vera_lambda_d.keys():
continue
lambda_d = self.vera_lambda_d[active_adapter]
lambda_b = self.vera_lambda_b[active_adapter]
vera_A = self.vera_A[active_adapter]
vera_B = self.vera_B[active_adapter]
dropout = self.vera_dropout[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
compute_dtype = lambda_d.dtype
if x.dtype != compute_dtype:
x = x.to(compute_dtype)
sliced_A = vera_A[:, : self.in_features].to(x.device)
sliced_B = vera_B[: self.out_features, :].to(x.device)
x_temp = dropout(x.to(lambda_d.dtype))
adapter_output = lambda_b * torch.nn.functional.linear(
lambda_d * torch.nn.functional.linear(x_temp, sliced_A), sliced_B
)
if requires_conversion:
adapter_output = adapter_output.to(expected_dtype)
result = result + adapter_output
# Ensure the output tensor has the same dtype as the input tensor
return result.to(x.dtype)
def __repr__(self) -> str:
rep = super().__repr__()
return "vera." + rep
| peft/src/peft/tuners/vera/bnb.py/0 | {
"file_path": "peft/src/peft/tuners/vera/bnb.py",
"repo_id": "peft",
"token_count": 8510
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import copy
import inspect
import os
import re
import warnings
from contextlib import nullcontext
from typing import Any, Optional
import accelerate
import torch
from accelerate.hooks import add_hook_to_module, remove_hook_from_module
from accelerate.utils import is_npu_available, is_xpu_available
from huggingface_hub import file_exists
from huggingface_hub.errors import EntryNotFoundError, HFValidationError
from packaging import version
from safetensors.torch import storage_ptr, storage_size
from ..import_utils import is_auto_gptq_available, is_gptqmodel_available, is_torch_tpu_available
from .constants import (
CONFIG_NAME,
EMBEDDING_LAYER_NAMES,
INCLUDE_LINEAR_LAYERS_SHORTHAND,
SAFETENSORS_WEIGHTS_NAME,
TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING,
TRANSFORMERS_MODELS_TO_FOURIERFT_TARGET_MODULES_MAPPING,
TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING,
TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING,
TRANSFORMERS_MODELS_TO_LNTUNING_TARGET_MODULES_MAPPING,
TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING,
TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING,
TRANSFORMERS_MODELS_TO_VBLORA_TARGET_MODULES_MAPPING,
TRANSFORMERS_MODELS_TO_VERA_TARGET_MODULES_MAPPING,
WEIGHTS_NAME,
bloom_model_postprocess_past_key_value,
starcoder_model_postprocess_past_key_value,
)
mlu_available = False
if version.parse(accelerate.__version__) >= version.parse("0.29.0"):
from accelerate.utils import is_mlu_available
mlu_available = is_mlu_available()
__all__ = [
"CONFIG_NAME",
"EMBEDDING_LAYER_NAMES",
"INCLUDE_LINEAR_LAYERS_SHORTHAND",
"SAFETENSORS_WEIGHTS_NAME",
"TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING",
"TRANSFORMERS_MODELS_TO_FOURIERFT_TARGET_MODULES_MAPPING",
"TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING",
"TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING",
"TRANSFORMERS_MODELS_TO_LNTUNING_TARGET_MODULES_MAPPING",
"TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING",
"TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING",
"TRANSFORMERS_MODELS_TO_VBLORA_TARGET_MODULES_MAPPING",
"TRANSFORMERS_MODELS_TO_VERA_TARGET_MODULES_MAPPING",
"WEIGHTS_NAME",
"bloom_model_postprocess_past_key_value",
"starcoder_model_postprocess_past_key_value",
]
# Get current device name based on available devices
def infer_device() -> str:
if torch.cuda.is_available():
return "cuda"
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
return "mps"
elif mlu_available:
return "mlu"
elif is_xpu_available():
return "xpu"
elif is_npu_available():
return "npu"
return "cpu"
def prepare_model_for_kbit_training(model, use_gradient_checkpointing=True, gradient_checkpointing_kwargs=None):
r"""
Note this method only works for `transformers` models.
This method wraps the entire protocol for preparing a model before running a training. This includes:
1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm
head to fp32 4- Freezing the base model layers to ensure they are not updated during training
Args:
model (`transformers.PreTrainedModel`):
The loaded model from `transformers`
use_gradient_checkpointing (`bool`, *optional*, defaults to `True`):
If True, use gradient checkpointing to save memory at the expense of slower backward pass.
gradient_checkpointing_kwargs (`dict`, *optional*, defaults to `None`):
Keyword arguments to pass to the gradient checkpointing function, please refer to the documentation of
`torch.utils.checkpoint.checkpoint` for more details about the arguments that you can pass to that method.
Note this is only available in the latest transformers versions (> 4.34.1).
"""
loaded_in_kbit = getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False)
is_gptq_quantized = getattr(model, "quantization_method", None) == "gptq"
is_aqlm_quantized = getattr(model, "quantization_method", None) == "aqlm"
is_eetq_quantized = getattr(model, "quantization_method", None) == "eetq"
is_torchao_quantized = getattr(model, "quantization_method", None) == "torchao"
is_hqq_quantized = getattr(model, "quantization_method", None) == "hqq" or getattr(model, "hqq_quantized", False)
if gradient_checkpointing_kwargs is None:
gradient_checkpointing_kwargs = {}
for name, param in model.named_parameters():
# freeze base model's layers
param.requires_grad = False
if (
not is_gptq_quantized
and not is_aqlm_quantized
and not is_eetq_quantized
and not is_hqq_quantized
and not is_torchao_quantized
):
# cast all non INT8 parameters to fp32
for param in model.parameters():
if (
(param.dtype == torch.float16) or (param.dtype == torch.bfloat16)
) and param.__class__.__name__ != "Params4bit":
param.data = param.data.to(torch.float32)
if (
loaded_in_kbit
or is_gptq_quantized
or is_aqlm_quantized
or is_eetq_quantized
or is_hqq_quantized
or is_torchao_quantized
) and use_gradient_checkpointing:
# When having `use_reentrant=False` + gradient_checkpointing, there is no need for this hack
if "use_reentrant" not in gradient_checkpointing_kwargs or gradient_checkpointing_kwargs["use_reentrant"]:
# For backward compatibility
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
else:
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
# To support older transformers versions, check if the model supports gradient_checkpointing_kwargs
_supports_gc_kwargs = "gradient_checkpointing_kwargs" in list(
inspect.signature(model.gradient_checkpointing_enable).parameters
)
if not _supports_gc_kwargs and len(gradient_checkpointing_kwargs) > 0:
warnings.warn(
"gradient_checkpointing_kwargs is not supported in this version of transformers. The passed kwargs will be ignored."
" if you want to use that feature, please upgrade to the latest version of transformers.",
FutureWarning,
)
gc_enable_kwargs = (
{} if not _supports_gc_kwargs else {"gradient_checkpointing_kwargs": gradient_checkpointing_kwargs}
)
# enable gradient checkpointing for memory efficiency
model.gradient_checkpointing_enable(**gc_enable_kwargs)
return model
# copied from transformers.models.bart.modeling_bart
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): input ids
pad_token_id (`int`): The id of the `padding` token.
decoder_start_token_id (`int`): The id of the `start` token.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
class ModulesToSaveWrapper(torch.nn.Module):
def __init__(self, module_to_save, adapter_name):
super().__init__()
self.original_module = module_to_save
self.modules_to_save = torch.nn.ModuleDict({})
self._active_adapter = adapter_name
self._disable_adapters = False
self.update(adapter_name)
self.check_module()
def check_module(self):
"""Perform some sanity checks on the module to ensure that it works"""
# Try to anticipate some modules that users could try to target that would not work.
# Note: It's not possible to check hasattr(module, "forward"), since that returns True for ModuleDict and
# ModuleList, even though their forward methods cannot be called
forbidden_classes = (torch.nn.ModuleDict, torch.nn.ModuleList, torch.nn.ParameterDict, torch.nn.ParameterList)
if isinstance(self.original_module, forbidden_classes):
cls_name = self.original_module.__class__
raise TypeError(f"modules_to_save cannot be applied to modules of type {cls_name}")
# local import to avoid circular import
from peft.tuners.tuners_utils import BaseTunerLayer
if isinstance(self.original_module, BaseTunerLayer):
# e.g. applying modules_to_save to a lora layer makes no sense
cls_name = self.original_module.__class__
raise TypeError(f"modules_to_save cannot be applied to modules of type {cls_name}")
@property
def disable_adapters(self) -> bool:
# use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method
return self._disable_adapters
@property
def active_adapter(self) -> str:
# use a property to ensure that active_adapter is not set directly, instead use the set_adapter method
return self._active_adapter
def __getattr__(self, name: str):
# Note: This whole method may seem overly complex at first but PyTorch messes with __getattr__ in a way that
# requires very careful handling to avoid infinite recursion.
try:
return super().__getattr__(name)
except AttributeError:
pass
if "_modules" not in self.__dict__:
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
# Could not find the attribute the PyTorch way. So let's check if it's an attribute on the
# original_module/modules_to_save.
modules = self.__dict__["_modules"]
if self.disable_adapters:
module = modules["original_module"]
elif self.active_adapter in modules["modules_to_save"]:
module = modules["modules_to_save"][self.active_adapter]
else:
# For some reason, there is no module corresponding to the active adapter; this should normally not be
# reached and exists as a failsafe (otherwise, a KeyError would be raised)
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
return getattr(module, name)
def update(self, adapter_name):
context_manager = nullcontext()
for _, param in self.original_module.named_parameters():
num_params = param.numel()
# if using DS Zero 3 and the weights are initialized empty
if num_params == 0 and hasattr(param, "ds_numel"):
import deepspeed
context_manager = deepspeed.zero.GatheredParameters(self.original_module.parameters(), modifier_rank=0)
break
if adapter_name not in self.modules_to_save:
with context_manager:
self.modules_to_save[adapter_name] = copy.deepcopy(self.original_module)
if hasattr(self.modules_to_save[adapter_name], "_hf_hook"):
old_hook = self.modules_to_save[adapter_name]._hf_hook
new_hook = self._create_new_hook(old_hook)
remove_hook_from_module(self.modules_to_save[adapter_name])
add_hook_to_module(self.modules_to_save[adapter_name], new_hook)
self.original_module.requires_grad_(False)
if adapter_name == self.active_adapter:
self.modules_to_save[adapter_name].requires_grad_(True)
def _create_new_hook(self, old_hook):
r"""
Creates a new hook based on the old hook. Use it only if you know what you are doing !
"""
old_hook_cls = getattr(accelerate.hooks, old_hook.__class__.__name__)
old_hook_attr = old_hook.__dict__
filtered_old_hook_attr = {}
old_hook_init_signature = inspect.signature(old_hook_cls.__init__)
for k in old_hook_attr.keys():
if k in old_hook_init_signature.parameters:
filtered_old_hook_attr[k] = old_hook_attr[k]
new_hook = old_hook_cls(**filtered_old_hook_attr)
return new_hook
def _check_forward_args(self, x, *args, **kwargs):
"""Check if the arguments are compatible with the configs and state of the model"""
adapter_names = kwargs.get("adapter_names", None)
if adapter_names is None:
return
if len(x) != len(adapter_names):
msg = (
"Length of `adapter_names` should be the same as the number of inputs, but got "
f"{len(adapter_names)} and {len(x)} respectively."
)
raise ValueError(msg)
def _mixed_batch_forward(
self, input: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any
) -> torch.Tensor:
# This is a special method that handles the case when users pass the argument `adapter_names`. This is an
# extra argument that allows mixing different adapters in the same batch at inference time.
SUPPORTED_MODULES = (torch.nn.Linear, torch.nn.Embedding, torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d)
module_names = ", ".join([module.__name__ for module in SUPPORTED_MODULES])
if not isinstance(self.original_module, SUPPORTED_MODULES):
raise TypeError(f"Mixed batching is only supported for the following modules: {module_names}.")
unique_adapters = set(adapter_names)
sub_batch_indices_list = []
for adapter in unique_adapters:
sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter])
results = [0 for _ in range(len(input))]
for i, active_adapter in enumerate(unique_adapters):
sub_batch = input[sub_batch_indices_list[i]]
if active_adapter == "__base__":
output = self.original_module(sub_batch, *args, **kwargs)
else:
output = self.modules_to_save[active_adapter](sub_batch, *args, **kwargs)
for index, j in enumerate(sub_batch_indices_list[i]):
results[j] = output[index]
return torch.stack(results)
def forward(self, x: torch.Tensor, *args, **kwargs):
self._check_forward_args(x, *args, **kwargs)
adapter_names = kwargs.pop("adapter_names", None)
if self.disable_adapters or (self.active_adapter not in self.modules_to_save):
return self.original_module(x, *args, **kwargs)
if adapter_names is None:
return self.modules_to_save[self.active_adapter](x, *args, **kwargs)
return self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
def enable_adapters(self, enabled: bool):
"""Toggle the enabling and disabling of adapters
Takes care of setting the requires_grad flag for the adapter weights.
Args:
enabled (bool): True to enable adapters, False to disable adapters
"""
if self._disable_adapters is not enabled:
# already in the desired state, do nothing
return
if enabled:
self.original_module.requires_grad_(False)
self.modules_to_save[self.active_adapter].requires_grad_(True)
self._disable_adapters = False
else:
self.original_module.requires_grad_(True)
self.modules_to_save.requires_grad_(False)
self._disable_adapters = True
def set_adapter(self, adapter_name: str):
"""Set the active adapter
Additionally, this function will set the specified adapter to trainable (i.e., requires_grad=True). If this is
not desired, use the following code.
```py
>>> for name, param in model_peft.named_parameters():
... if ...: # some check on name (ex. if 'lora' in name)
... param.requires_grad = False
```
Args:
adapter_name (str): The name of the adapter to set as active
"""
if adapter_name not in self.modules_to_save:
raise ValueError(f"Adapter {adapter_name} not found in {self.modules_to_save.keys()}")
self.modules_to_save[self.active_adapter].requires_grad_(False)
self.modules_to_save[adapter_name].requires_grad_(True)
self._active_adapter = adapter_name
def _get_submodules(model, key):
parent = model.get_submodule(".".join(key.split(".")[:-1]))
target_name = key.split(".")[-1]
target = model.get_submodule(key)
return parent, target, target_name
def _freeze_adapter(model, adapter_name):
for n, p in model.named_parameters():
if adapter_name in n:
p.requires_grad = False
def _set_trainable(model, adapter_name, modules_to_save):
key_list = [key for key, _ in model.named_modules()]
for key in key_list:
target_module_found = any(key.endswith(target_key) for target_key in modules_to_save)
if target_module_found:
parent, target, target_name = _get_submodules(model, key)
if isinstance(target, ModulesToSaveWrapper):
target.update(adapter_name)
target.set_adapter(target.active_adapter)
else:
new_module = ModulesToSaveWrapper(target, adapter_name)
new_module.set_adapter(adapter_name)
setattr(parent, target_name, new_module)
def _set_adapter(model, adapter_name):
def check_adapter_name(adapter_name):
if isinstance(adapter_name, str):
return adapter_name
# adapter_name is a list of str
if len(adapter_name) > 1:
raise ValueError("Only one adapter can be set at a time for modules_to_save")
elif len(adapter_name) == 0:
raise ValueError("Please specify at least one adapter to set")
adapter_name = adapter_name[0]
return adapter_name
for module in model.modules():
if isinstance(module, ModulesToSaveWrapper):
# only check the adapter_name if we actually encounter a ModulesToSaveWrapper, otherwise we don't care
adapter_name = check_adapter_name(adapter_name)
# if the adapter is found in this module, set it as the active adapter, else disable the adapters of this
# module
if adapter_name in module.modules_to_save:
module.set_adapter(adapter_name)
else:
module.enable_adapters(False)
def _prepare_prompt_learning_config(peft_config, model_config):
if peft_config.num_layers is None:
if "num_hidden_layers" in model_config:
num_layers = model_config["num_hidden_layers"]
elif "num_layers" in model_config:
num_layers = model_config["num_layers"]
elif "n_layer" in model_config:
num_layers = model_config["n_layer"]
else:
raise ValueError("Please specify `num_layers` in `peft_config`")
peft_config.num_layers = num_layers
if peft_config.token_dim is None:
if "hidden_size" in model_config:
token_dim = model_config["hidden_size"]
elif "n_embd" in model_config:
token_dim = model_config["n_embd"]
elif "d_model" in model_config:
token_dim = model_config["d_model"]
else:
raise ValueError("Please specify `token_dim` in `peft_config`")
peft_config.token_dim = token_dim
if peft_config.num_attention_heads is None:
if "num_attention_heads" in model_config:
num_attention_heads = model_config["num_attention_heads"]
elif "n_head" in model_config:
num_attention_heads = model_config["n_head"]
elif "num_heads" in model_config:
num_attention_heads = model_config["num_heads"]
elif "encoder_attention_heads" in model_config:
num_attention_heads = model_config["encoder_attention_heads"]
else:
raise ValueError("Please specify `num_attention_heads` in `peft_config`")
peft_config.num_attention_heads = num_attention_heads
# For grouped-query attention, see #1901.
if peft_config.peft_type == "PREFIX_TUNING" and "num_key_value_heads" in model_config:
num_key_value_heads = model_config["num_key_value_heads"]
peft_config.token_dim = peft_config.token_dim // peft_config.num_attention_heads * num_key_value_heads
peft_config.num_attention_heads = num_key_value_heads
if getattr(peft_config, "encoder_hidden_size", None) is None:
setattr(peft_config, "encoder_hidden_size", peft_config.token_dim)
return peft_config
def fsdp_auto_wrap_policy(model):
import functools
import os
from accelerate import FullyShardedDataParallelPlugin
if hasattr(FullyShardedDataParallelPlugin, "get_module_class_from_name"):
get_module_class_from_name = FullyShardedDataParallelPlugin.get_module_class_from_name
else:
from accelerate.utils.dataclasses import get_module_class_from_name
from torch.distributed.fsdp.wrap import _or_policy, lambda_auto_wrap_policy, transformer_auto_wrap_policy
from ..tuners import PrefixEncoder, PromptEmbedding, PromptEncoder
default_transformer_cls_names_to_wrap = (
",".join(model._no_split_modules) if getattr(model, "_no_split_modules", None) is not None else ""
)
transformer_cls_names_to_wrap = os.environ.get(
"FSDP_TRANSFORMER_CLS_TO_WRAP", default_transformer_cls_names_to_wrap
).split(",")
transformer_cls_to_wrap = {PrefixEncoder, PromptEncoder, PromptEmbedding}
for layer_class in transformer_cls_names_to_wrap:
if len(layer_class) == 0:
continue
transformer_cls = get_module_class_from_name(model, layer_class)
if transformer_cls is None:
raise Exception("Could not find the transformer layer class to wrap in the model.")
else:
transformer_cls_to_wrap.add(transformer_cls)
def lambda_policy_fn(module):
if (
len(list(module.named_children())) == 0
and getattr(module, "weight", None) is not None
and module.weight.requires_grad
):
return True
return False
lambda_policy = functools.partial(lambda_auto_wrap_policy, lambda_fn=lambda_policy_fn)
transformer_wrap_policy = functools.partial(
transformer_auto_wrap_policy,
transformer_layer_cls=transformer_cls_to_wrap,
)
auto_wrap_policy = functools.partial(_or_policy, policies=[lambda_policy, transformer_wrap_policy])
return auto_wrap_policy
def transpose(weight, fan_in_fan_out):
if not fan_in_fan_out:
return weight
if isinstance(weight, torch.nn.Parameter):
return torch.nn.Parameter(weight.T)
return weight.T
def _is_valid_match(key: str, target_key: str):
"""
Helper function to match module names target_key and key. Makes sure that either the key is exactly the target_key
or the target_key is a submodule of key
"""
if key.endswith(target_key):
if len(key) > len(target_key):
return key.endswith("." + target_key) # must be a sub module
return True
return False
def _get_batch_size(input_ids: Optional[torch.Tensor], inputs_embeds: Optional[torch.Tensor]) -> int:
"""Get the batch size based on either input_ids or input_embeds
Raises an ValueError if both are None.
"""
if (input_ids is None) and (inputs_embeds is None):
raise ValueError("You have to provide either input_ids or inputs_embeds")
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
return batch_size
def get_quantization_config(model: torch.nn.Module, method: str):
"""
Get the quantization config of the related quantization method
"""
if (
hasattr(model, "config")
and hasattr(model.config, "quantization_config")
and (getattr(model, "quantization_method", None) == method)
):
return model.config.quantization_config
return None
def get_auto_gptq_quant_linear(gptq_quantization_config):
"""
Get the right AutoGPTQQuantLinear class based on the quantization config file
"""
if gptq_quantization_config is None:
return None
if is_auto_gptq_available():
from auto_gptq.utils.import_utils import dynamically_import_QuantLinear
else:
return None
desc_act = gptq_quantization_config.desc_act
group_size = gptq_quantization_config.group_size
bits = gptq_quantization_config.bits
if hasattr(gptq_quantization_config, "use_exllama"):
use_exllama = gptq_quantization_config.use_exllama
else:
use_exllama = not gptq_quantization_config.disable_exllama
if hasattr(gptq_quantization_config, "exllama_config"):
exllama_version = gptq_quantization_config.exllama_config["version"]
else:
exllama_version = 1
QuantLinear = dynamically_import_QuantLinear(
use_triton=False,
desc_act=desc_act,
group_size=group_size,
bits=bits,
disable_exllama=not (use_exllama and exllama_version == 1),
disable_exllamav2=not (use_exllama and exllama_version == 2),
)
return QuantLinear
def get_gptqmodel_quant_linear(gptq_quantization_config, device_map=None):
"""
Get the right GPTQQuantLinear class based on the quantization config file
"""
if gptq_quantization_config is None:
return None
if not is_gptqmodel_available():
return None
from gptqmodel.utils.importer import hf_select_quant_linear
desc_act = gptq_quantization_config.desc_act
group_size = gptq_quantization_config.group_size
bits = gptq_quantization_config.bits
checkpoint_format = (
gptq_quantization_config.checkpoint_format
if hasattr(gptq_quantization_config, "checkpoint_format")
else "gptq"
)
sym = gptq_quantization_config.sym
meta = gptq_quantization_config.meta if hasattr(gptq_quantization_config, "meta") else None
QuantLinear = hf_select_quant_linear(
bits=bits,
group_size=group_size,
desc_act=desc_act,
sym=sym,
device_map=device_map,
checkpoint_format=checkpoint_format,
meta=meta,
backend="auto_trainable",
)
return QuantLinear
def id_tensor_storage(tensor: torch.Tensor) -> tuple[torch.device, int, int]:
"""
Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For
example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is
guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with
non-overlapping lifetimes may have the same id.
This method is the exact same copy of
https://github.com/huggingface/transformers/blob/main/src/transformers/pytorch_utils.py#L282C1-L300C58 but we added
it here manually to avoid import issue with old versions of transformers.
"""
if tensor.device.type == "xla" and is_torch_tpu_available():
# NOTE: xla tensors dont have storage
# use some other unique id to distinguish.
# this is a XLA tensor, it must be created using torch_xla's
# device. So the following import is safe:
import torch_xla
unique_id = torch_xla._XLAC._xla_get_tensor_id(tensor)
else:
unique_id = storage_ptr(tensor)
return tensor.device, unique_id, storage_size(tensor)
def cast_mixed_precision_params(model, dtype):
"""
Cast all non-trainable parameters of the model to the given `dtype`. The `dtype` can be `torch.float16` or
`torch.bfloat16` as per the mixed-precision training you are performing. The trainable parameters are cast to full
precision. This is meant to reduce the GPU memory usage when using PEFT methods by using half-precision dtype for
non-trainable parameters. Having the trainable parameters in full-precision preserves training stability when using
automatic mixed-precision training.
Args:
model (`torch.nn.Module`):
The model to cast the non-trainable parameters of.
dtype (`torch.dtype`):
The dtype to cast the non-trainable parameters to. The `dtype` can be `torch.float16` or
`torch.bfloat16` as per the mixed-precision training you are performing.
"""
for p in model.parameters():
if not p.requires_grad:
p.data = p.to(dtype)
else:
p.data = p.to(torch.float32)
def str_to_bool(value: str) -> int:
"""
Converts a string representation of truth to `True` (1) or `False` (0).
True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`;
"""
# same as function as in accelerate.utils, which replaces the deprecated distutils.util.strtobool
value = value.lower()
if value in ("y", "yes", "t", "true", "on", "1"):
return 1
elif value in ("n", "no", "f", "false", "off", "0"):
return 0
else:
raise ValueError(f"invalid truth value {value}")
def check_file_exists_on_hf_hub(repo_id: str, filename: str, **kwargs) -> Optional[bool]:
"""Check if a file exists on HF Hub, if check was not successful returns None instead of erroring.
Respect offline mode if set.
"""
exists: Optional[bool] = None
if str_to_bool(os.environ.get("HF_HUB_OFFLINE", "0")):
# user set offline mode, cannot check
return exists
try:
exists = file_exists(repo_id, filename, **kwargs)
except (HFValidationError, EntryNotFoundError):
# error, exists stays None
pass
except Exception as e:
warnings.warn(
f"Unable to fetch remote file due to the following error {e} - silently ignoring the lookup"
f" for the file {filename} in {repo_id}."
)
return exists
def get_pattern_key(pattern_keys, key_to_match):
"""Match a substring of key_to_match in pattern keys"""
return next(filter(lambda key: re.match(rf".*\.{key}$", key_to_match), pattern_keys), key_to_match)
| peft/src/peft/utils/other.py/0 | {
"file_path": "peft/src/peft/utils/other.py",
"repo_id": "peft",
"token_count": 12804
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from parameterized import parameterized
from transformers import AutoModel
from peft import LoraConfig, PrefixTuningConfig, PromptLearningConfig
from .testing_common import PeftCommonTester, PeftTestConfigManager
PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST = [
"hf-internal-testing/tiny-random-BertModel",
"hf-internal-testing/tiny-random-RobertaModel",
"hf-internal-testing/tiny-random-DebertaModel",
"hf-internal-testing/tiny-random-DebertaV2Model",
]
FULL_GRID = {
"model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST,
"task_type": "FEATURE_EXTRACTION",
}
def skip_non_prompt_tuning(test_list):
"""Skip tests that are not prompt tuning"""
return [
test for test in test_list if issubclass(test[2], PromptLearningConfig) and (test[2] != PrefixTuningConfig)
]
def skip_deberta_lora_tests(test_list):
r"""
Skip tests that are checkpointing with lora/ia3/boft/vera/fourierft for Deberta models (couldn't find much info on
the error)
"""
to_skip = ["lora", "ia3", "boft", "vera", "fourierft", "hra", "bone"]
return [test for test in test_list if not (any(k in test[0] for k in to_skip) and "Deberta" in test[0])]
def skip_deberta_pt_tests(test_list):
r"""
Skip tests that are checkpointing with lora/ia3 tests for Deberta models (couldn't find much info on the error)
"""
return [test for test in test_list if not ("prefix_tuning" in test[0] and "Deberta" in test[0])]
class PeftFeatureExtractionModelTester(unittest.TestCase, PeftCommonTester):
r"""
Test if the PeftModel behaves as expected. This includes:
- test if the model has the expected methods
We use parametrized.expand for debugging purposes to test each model individually.
"""
transformers_class = AutoModel
def prepare_inputs_for_testing(self):
input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device)
attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
input_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return input_dict
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs):
self._test_model_attr(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_adapter_name(self, test_name, model_id, config_cls, config_kwargs):
self._test_adapter_name(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs):
self._test_prepare_for_training(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_save_pretrained_selected_adapters(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs)
def test_load_model_low_cpu_mem_usage(self):
self._test_load_model_low_cpu_mem_usage(PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST[0], LoraConfig, {})
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_from_pretrained_config_construction(self, test_name, model_id, config_cls, config_kwargs):
self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"adalora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"boft_kwargs": {"init_weights": [False]},
"oft_kwargs": {"init_weights": [False]},
"vera_kwargs": {"init_weights": [False]},
"hra_kwargs": {"init_weights": [False]},
"bone_kwargs": {"init_weights": [False]},
"task_type": "FEATURE_EXTRACTION",
},
)
)
def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs):
self._test_merge_layers(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_training(self, test_name, model_id, config_cls, config_kwargs):
self._test_training(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_deberta_pt_tests)
)
def test_training_prompt_learning_tasks(self, test_name, model_id, config_cls, config_kwargs):
self._test_training_prompt_learning_tasks(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_training_layer_indexing(self, test_name, model_id, config_cls, config_kwargs):
self._test_training_layer_indexing(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_deberta_lora_tests)
)
def test_training_gradient_checkpointing(self, test_name, model_id, config_cls, config_kwargs):
self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_inference_safetensors(self, test_name, model_id, config_cls, config_kwargs):
self._test_inference_safetensors(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_peft_model_device_map(self, test_name, model_id, config_cls, config_kwargs):
self._test_peft_model_device_map(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_delete_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_delete_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_delete_inactive_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_delete_inactive_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"adalora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"boft_kwargs": {"init_weights": [False]},
"oft_kwargs": {"init_weights": [False]},
"vera_kwargs": {"init_weights": [False]},
"hra_kwargs": {"init_weights": [False]},
"bone_kwargs": {"init_weights": [False]},
"task_type": "FEATURE_EXTRACTION",
},
)
)
def test_unload_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_unload_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"boft_kwargs": {"init_weights": [False]},
"oft_kwargs": {"init_weights": [False]},
"hra_kwargs": {"init_weights": [False]},
"bone_kwargs": {"init_weights": [False]},
"task_type": "FEATURE_EXTRACTION",
},
)
)
def test_weighted_combination_of_adapters(self, test_name, model_id, config_cls, config_kwargs):
self._test_weighted_combination_of_adapters(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_non_prompt_tuning)
)
def test_passing_input_embeds_works(self, test_name, model_id, config_cls, config_kwargs):
self._test_passing_input_embeds_works(test_name, model_id, config_cls, config_kwargs)
| peft/tests/test_feature_extraction_models.py/0 | {
"file_path": "peft/tests/test_feature_extraction_models.py",
"repo_id": "peft",
"token_count": 3985
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import asdict, replace
from unittest import TestCase
import numpy as np
from diffusers import StableDiffusionPipeline
from parameterized import parameterized
from peft import (
BOFTConfig,
HRAConfig,
LoHaConfig,
LoraConfig,
OFTConfig,
get_peft_model,
get_peft_model_state_dict,
inject_adapter_in_model,
set_peft_model_state_dict,
)
from peft.tuners.tuners_utils import BaseTunerLayer
from .testing_common import ClassInstantier, PeftCommonTester
from .testing_utils import temp_seed
PEFT_DIFFUSERS_SD_MODELS_TO_TEST = ["hf-internal-testing/tiny-sd-pipe"]
CONFIG_TESTING_KWARGS = (
{
"text_encoder": {
"r": 8,
"lora_alpha": 32,
"target_modules": ["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"],
"lora_dropout": 0.0,
"bias": "none",
},
"unet": {
"r": 8,
"lora_alpha": 32,
"target_modules": ["proj_in", "proj_out", "to_k", "to_q", "to_v", "to_out.0", "ff.net.0.proj", "ff.net.2"],
"lora_dropout": 0.0,
"bias": "none",
},
},
{
"text_encoder": {
"r": 8,
"alpha": 32,
"target_modules": ["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"],
"rank_dropout": 0.0,
"module_dropout": 0.0,
},
"unet": {
"r": 8,
"alpha": 32,
"target_modules": ["proj_in", "proj_out", "to_k", "to_q", "to_v", "to_out.0", "ff.net.0.proj", "ff.net.2"],
"rank_dropout": 0.0,
"module_dropout": 0.0,
},
},
{
"text_encoder": {
"r": 1,
"target_modules": ["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"],
"module_dropout": 0.0,
},
"unet": {
"r": 1,
"target_modules": ["proj_in", "proj_out", "to_k", "to_q", "to_v", "to_out.0", "ff.net.0.proj", "ff.net.2"],
"module_dropout": 0.0,
},
},
{
"text_encoder": {
"boft_block_num": 1,
"boft_block_size": 0,
"target_modules": ["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"],
"boft_dropout": 0.0,
},
"unet": {
"boft_block_num": 1,
"boft_block_size": 0,
"target_modules": ["proj_in", "proj_out", "to_k", "to_q", "to_v", "to_out.0", "ff.net.0.proj", "ff.net.2"],
"boft_dropout": 0.0,
},
},
{
"text_encoder": {
"r": 8,
"target_modules": ["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"],
},
"unet": {
"r": 8,
"target_modules": ["proj_in", "proj_out", "to_k", "to_q", "to_v", "to_out.0", "ff.net.0.proj", "ff.net.2"],
},
},
)
CLASSES_MAPPING = {
"lora": (LoraConfig, CONFIG_TESTING_KWARGS[0]),
"loha": (LoHaConfig, CONFIG_TESTING_KWARGS[1]),
"lokr": (LoHaConfig, CONFIG_TESTING_KWARGS[1]),
"oft": (OFTConfig, CONFIG_TESTING_KWARGS[2]),
"boft": (BOFTConfig, CONFIG_TESTING_KWARGS[3]),
"hra": (HRAConfig, CONFIG_TESTING_KWARGS[4]),
}
PeftStableDiffusionTestConfigManager = ClassInstantier(CLASSES_MAPPING)
class StableDiffusionModelTester(TestCase, PeftCommonTester):
r"""
Tests that diffusers StableDiffusion model works with PEFT as expected.
"""
transformers_class = StableDiffusionPipeline
def instantiate_sd_peft(self, model_id, config_cls, config_kwargs):
# Instantiate StableDiffusionPipeline
model = self.transformers_class.from_pretrained(model_id)
config_kwargs = config_kwargs.copy()
text_encoder_kwargs = config_kwargs.pop("text_encoder")
unet_kwargs = config_kwargs.pop("unet")
# the remaining config kwargs should be applied to both configs
for key, val in config_kwargs.items():
text_encoder_kwargs[key] = val
unet_kwargs[key] = val
# Instantiate text_encoder adapter
config_text_encoder = config_cls(**text_encoder_kwargs)
model.text_encoder = get_peft_model(model.text_encoder, config_text_encoder)
# Instantiate unet adapter
config_unet = config_cls(**unet_kwargs)
model.unet = get_peft_model(model.unet, config_unet)
# Move model to device
model = model.to(self.torch_device)
return model
def prepare_inputs_for_testing(self):
return {
"prompt": "a high quality digital photo of a cute corgi",
"num_inference_steps": 3,
}
@parameterized.expand(
PeftStableDiffusionTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_DIFFUSERS_SD_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"loha_kwargs": {"init_weights": [False]},
"oft_kwargs": {"init_weights": [False]},
"boft_kwargs": {"init_weights": [False]},
"hra_kwargs": {"init_weights": [False]},
},
)
)
def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs):
# Instantiate model & adapters
model = self.instantiate_sd_peft(model_id, config_cls, config_kwargs)
# Generate output for peft modified StableDiffusion
dummy_input = self.prepare_inputs_for_testing()
with temp_seed(seed=42):
peft_output = np.array(model(**dummy_input).images[0]).astype(np.float32)
# Merge adapter and model
if config_cls not in [LoHaConfig, OFTConfig, HRAConfig]:
# TODO: Merging the text_encoder is leading to issues on CPU with PyTorch 2.1
model.text_encoder = model.text_encoder.merge_and_unload()
model.unet = model.unet.merge_and_unload()
# Generate output for peft merged StableDiffusion
with temp_seed(seed=42):
merged_output = np.array(model(**dummy_input).images[0]).astype(np.float32)
# Images are in uint8 drange, so use large atol
assert np.allclose(peft_output, merged_output, atol=1.0)
@parameterized.expand(
PeftStableDiffusionTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_DIFFUSERS_SD_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"loha_kwargs": {"init_weights": [False]},
"oft_kwargs": {"init_weights": [False]},
"boft_kwargs": {"init_weights": [False]},
"hra_kwargs": {"init_weights": [False]},
},
)
)
def test_merge_layers_safe_merge(self, test_name, model_id, config_cls, config_kwargs):
# Instantiate model & adapters
model = self.instantiate_sd_peft(model_id, config_cls, config_kwargs)
# Generate output for peft modified StableDiffusion
dummy_input = self.prepare_inputs_for_testing()
with temp_seed(seed=42):
peft_output = np.array(model(**dummy_input).images[0]).astype(np.float32)
# Merge adapter and model
if config_cls not in [LoHaConfig, OFTConfig, HRAConfig]:
# TODO: Merging the text_encoder is leading to issues on CPU with PyTorch 2.1
model.text_encoder = model.text_encoder.merge_and_unload(safe_merge=True)
model.unet = model.unet.merge_and_unload(safe_merge=True)
# Generate output for peft merged StableDiffusion
with temp_seed(seed=42):
merged_output = np.array(model(**dummy_input).images[0]).astype(np.float32)
# Images are in uint8 drange, so use large atol
assert np.allclose(peft_output, merged_output, atol=1.0)
@parameterized.expand(
PeftStableDiffusionTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_DIFFUSERS_SD_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
},
filter_params_func=lambda tests: [
x for x in tests if all(s not in x[0] for s in ["loha", "lokr", "oft", "hra"])
],
)
)
def test_add_weighted_adapter_base_unchanged(self, test_name, model_id, config_cls, config_kwargs):
# Instantiate model & adapters
model = self.instantiate_sd_peft(model_id, config_cls, config_kwargs)
# Get current available adapter config
text_encoder_adapter_name = next(iter(model.text_encoder.peft_config.keys()))
unet_adapter_name = next(iter(model.unet.peft_config.keys()))
text_encoder_adapter_config = replace(model.text_encoder.peft_config[text_encoder_adapter_name])
unet_adapter_config = replace(model.unet.peft_config[unet_adapter_name])
# Create weighted adapters
model.text_encoder.add_weighted_adapter([unet_adapter_name], [0.5], "weighted_adapter_test")
model.unet.add_weighted_adapter([unet_adapter_name], [0.5], "weighted_adapter_test")
# Assert that base adapters config did not change
assert asdict(text_encoder_adapter_config) == asdict(model.text_encoder.peft_config[text_encoder_adapter_name])
assert asdict(unet_adapter_config) == asdict(model.unet.peft_config[unet_adapter_name])
@parameterized.expand(
PeftStableDiffusionTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_DIFFUSERS_SD_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"loha_kwargs": {"init_weights": [False]},
"lokr_kwargs": {"init_weights": [False]},
"oft_kwargs": {"init_weights": [False]},
"boft_kwargs": {"init_weights": [False]},
"hra_kwargs": {"init_weights": [False]},
},
)
)
def test_disable_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_disable_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftStableDiffusionTestConfigManager.get_grid_parameters(
{"model_ids": PEFT_DIFFUSERS_SD_MODELS_TO_TEST},
)
)
def test_load_model_low_cpu_mem_usage(self, test_name, model_id, config_cls, config_kwargs):
# Instantiate model & adapters
pipe = self.instantiate_sd_peft(model_id, config_cls, config_kwargs)
te_state_dict = get_peft_model_state_dict(pipe.text_encoder)
unet_state_dict = get_peft_model_state_dict(pipe.unet)
del pipe
pipe = self.instantiate_sd_peft(model_id, config_cls, config_kwargs)
config_kwargs = config_kwargs.copy()
text_encoder_kwargs = config_kwargs.pop("text_encoder")
unet_kwargs = config_kwargs.pop("unet")
# the remaining config kwargs should be applied to both configs
for key, val in config_kwargs.items():
text_encoder_kwargs[key] = val
unet_kwargs[key] = val
config_text_encoder = config_cls(**text_encoder_kwargs)
config_unet = config_cls(**unet_kwargs)
# check text encoder
inject_adapter_in_model(config_text_encoder, pipe.text_encoder, low_cpu_mem_usage=True)
# sanity check that the adapter was applied:
assert any(isinstance(module, BaseTunerLayer) for module in pipe.text_encoder.modules())
assert "meta" in {p.device.type for p in pipe.text_encoder.parameters()}
set_peft_model_state_dict(pipe.text_encoder, te_state_dict, low_cpu_mem_usage=True)
assert "meta" not in {p.device.type for p in pipe.text_encoder.parameters()}
# check unet
inject_adapter_in_model(config_unet, pipe.unet, low_cpu_mem_usage=True)
# sanity check that the adapter was applied:
assert any(isinstance(module, BaseTunerLayer) for module in pipe.unet.modules())
assert "meta" in {p.device.type for p in pipe.unet.parameters()}
set_peft_model_state_dict(pipe.unet, unet_state_dict, low_cpu_mem_usage=True)
assert "meta" not in {p.device.type for p in pipe.unet.parameters()}
| peft/tests/test_stablediffusion.py/0 | {
"file_path": "peft/tests/test_stablediffusion.py",
"repo_id": "peft",
"token_count": 6040
} |
# Feature Extraction
All of the models in `timm` have consistent mechanisms for obtaining various types of features from the model for tasks besides classification.
## Penultimate Layer Features (Pre-Classifier Features)
The features from the penultimate model layer can be obtained in several ways without requiring model surgery (although feel free to do surgery). One must first decide if they want pooled or un-pooled features.
### Unpooled
There are three ways to obtain unpooled features. The final, unpooled features are sometimes referred to as the last hidden state. In `timm` this is up to and including the final normalization layer (in e.g. ViT style models) but does not include pooling / class token selection and final post-pooling layers.
Without modifying the network, one can call `model.forward_features(input)` on any model instead of the usual `model(input)`. This will bypass the head classifier and global pooling for networks.
If one wants to explicitly modify the network to return unpooled features, they can either create the model without a classifier and pooling, or remove it later. Both paths remove the parameters associated with the classifier from the network.
#### forward_features()
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('xception41', pretrained=True)
>>> o = m(torch.randn(2, 3, 299, 299))
>>> print(f'Original shape: {o.shape}')
>>> o = m.forward_features(torch.randn(2, 3, 299, 299))
>>> print(f'Unpooled shape: {o.shape}')
```
Output:
```text
Original shape: torch.Size([2, 1000])
Unpooled shape: torch.Size([2, 2048, 10, 10])
```
#### Create with no classifier and pooling
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('resnet50', pretrained=True, num_classes=0, global_pool='')
>>> o = m(torch.randn(2, 3, 224, 224))
>>> print(f'Unpooled shape: {o.shape}')
```
Output:
```text
Unpooled shape: torch.Size([2, 2048, 7, 7])
```
#### Remove it later
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('densenet121', pretrained=True)
>>> o = m(torch.randn(2, 3, 224, 224))
>>> print(f'Original shape: {o.shape}')
>>> m.reset_classifier(0, '')
>>> o = m(torch.randn(2, 3, 224, 224))
>>> print(f'Unpooled shape: {o.shape}')
```
Output:
```text
Original shape: torch.Size([2, 1000])
Unpooled shape: torch.Size([2, 1024, 7, 7])
```
#### Chaining unpooled output to classifier
The last hidden state can be fed back into the head of the model using the `forward_head()` function.
```py
>>> model = timm.create_model('vit_medium_patch16_reg1_gap_256', pretrained=True)
>>> output = model.forward_features(torch.randn(2,3,256,256))
>>> print('Unpooled output shape:', output.shape)
>>> classified = model.forward_head(output)
>>> print('Classification output shape:', classified.shape)
```
Output:
```text
Unpooled output shape: torch.Size([2, 257, 512])
Classification output shape: torch.Size([2, 1000])
```
### Pooled
To modify the network to return pooled features, one can use `forward_features()` and pool/flatten the result themselves, or modify the network like above but keep pooling intact.
#### Create with no classifier
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('resnet50', pretrained=True, num_classes=0)
>>> o = m(torch.randn(2, 3, 224, 224))
>>> print(f'Pooled shape: {o.shape}')
```
Output:
```text
Pooled shape: torch.Size([2, 2048])
```
#### Remove it later
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('ese_vovnet19b_dw', pretrained=True)
>>> o = m(torch.randn(2, 3, 224, 224))
>>> print(f'Original shape: {o.shape}')
>>> m.reset_classifier(0)
>>> o = m(torch.randn(2, 3, 224, 224))
>>> print(f'Pooled shape: {o.shape}')
```
Output:
```text
Original shape: torch.Size([2, 1000])
Pooled shape: torch.Size([2, 1024])
```
## Multi-scale Feature Maps (Feature Pyramid)
Object detection, segmentation, keypoint, and a variety of dense pixel tasks require access to feature maps from the backbone network at multiple scales. This is often done by modifying the original classification network. Since each network varies quite a bit in structure, it's not uncommon to see only a few backbones supported in any given obj detection or segmentation library.
`timm` allows a consistent interface for creating any of the included models as feature backbones that output feature maps for selected levels.
A feature backbone can be created by adding the argument `features_only=True` to any `create_model` call. By default most models with a feature hierarchy will output up to 5 features up to a reduction of 32. However this varies per model, some models have fewer hierarchy levels, and some (like ViT) have a larger number of non-hierarchical feature maps and they default to outputting the last 3. The `out_indices` arg can be passed to `create_model` to specify which features you want.
### Create a feature map extraction model
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('resnest26d', features_only=True, pretrained=True)
>>> o = m(torch.randn(2, 3, 224, 224))
>>> for x in o:
... print(x.shape)
```
Output:
```text
torch.Size([2, 64, 112, 112])
torch.Size([2, 256, 56, 56])
torch.Size([2, 512, 28, 28])
torch.Size([2, 1024, 14, 14])
torch.Size([2, 2048, 7, 7])
```
### Query the feature information
After a feature backbone has been created, it can be queried to provide channel or resolution reduction information to the downstream heads without requiring static config or hardcoded constants. The `.feature_info` attribute is a class encapsulating the information about the feature extraction points.
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('regnety_032', features_only=True, pretrained=True)
>>> print(f'Feature channels: {m.feature_info.channels()}')
>>> o = m(torch.randn(2, 3, 224, 224))
>>> for x in o:
... print(x.shape)
```
Output:
```text
Feature channels: [32, 72, 216, 576, 1512]
torch.Size([2, 32, 112, 112])
torch.Size([2, 72, 56, 56])
torch.Size([2, 216, 28, 28])
torch.Size([2, 576, 14, 14])
torch.Size([2, 1512, 7, 7])
```
### Select specific feature levels or limit the stride
There are two additional creation arguments impacting the output features.
* `out_indices` selects which indices to output
* `output_stride` limits the feature output stride of the network (also works in classification mode BTW)
#### Output index selection
The `out_indices` argument is supported by all models, but not all models have the same index to feature stride mapping. Look at the code or check feature_info to compare. The out indices generally correspond to the `C(i+1)th` feature level (a `2^(i+1)` reduction). For most convnet models, index 0 is the stride 2 features, and index 4 is stride 32. For many ViT or ViT-Conv hybrids there may be many to all features maps of the same shape, or a combination of hierarchical and non-hierarchical feature maps. It is best to look at the `feature_info` attribute to see the number of features, their corresponding channel count and reduction level.
`out_indices` supports negative indexing, this makes it easy to get the last, penultimate, etc feature map. `out_indices=(-2,)` would return the penultimate feature map for any model.
#### Output stride (feature map dilation)
`output_stride` is achieved by converting layers to use dilated convolutions. Doing so is not always straightforward, some networks only support `output_stride=32`.
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('ecaresnet101d', features_only=True, output_stride=8, out_indices=(2, 4), pretrained=True)
>>> print(f'Feature channels: {m.feature_info.channels()}')
>>> print(f'Feature reduction: {m.feature_info.reduction()}')
>>> o = m(torch.randn(2, 3, 320, 320))
>>> for x in o:
... print(x.shape)
```
Output:
```text
Feature channels: [512, 2048]
Feature reduction: [8, 8]
torch.Size([2, 512, 40, 40])
torch.Size([2, 2048, 40, 40])
```
## Flexible intermediate feature map extraction
In addition to using `features_only` with the model factory, many models support a `forward_intermediates()` method which provides a flexible mechanism for extracting both the intermediate feature maps and the last hidden state (which can be chained to the head). Additionally this method supports some model specific features such as returning class or distill prefix tokens for some models.
Accompanying the `forward_intermediates` function is a `prune_intermediate_layers` function that allows one to prune layers from the model, including both the head, final norm, and/or trailing blocks/stages that are not needed.
An `indices` argument is used for both `forward_intermediates()` and `prune_intermediate_layers()` to select the features to return or layers to remove. As with the `out_indices` for `features_only` API, `indices` is model specific and selects which intermediates are returned.
In non-hierarchical block based models such as ViT the indices correspond to the blocks, in models with hierarchical stages they usually correspond to the output of the stem + each hierarchical stage. Both positive (from the start), and negative (relative to the end) indexing works, and `None` is used to return all intermediates.
The `prune_intermediate_layers()` call returns an indices variable, as negative indices must be converted to absolute (positive) indices when the model is trimmed.
```py
model = timm.create_model('vit_medium_patch16_reg1_gap_256', pretrained=True)
output, intermediates = model.forward_intermediates(torch.randn(2,3,256,256))
for i, o in enumerate(intermediates):
print(f'Feat index: {i}, shape: {o.shape}')
```
```text
Feat index: 0, shape: torch.Size([2, 512, 16, 16])
Feat index: 1, shape: torch.Size([2, 512, 16, 16])
Feat index: 2, shape: torch.Size([2, 512, 16, 16])
Feat index: 3, shape: torch.Size([2, 512, 16, 16])
Feat index: 4, shape: torch.Size([2, 512, 16, 16])
Feat index: 5, shape: torch.Size([2, 512, 16, 16])
Feat index: 6, shape: torch.Size([2, 512, 16, 16])
Feat index: 7, shape: torch.Size([2, 512, 16, 16])
Feat index: 8, shape: torch.Size([2, 512, 16, 16])
Feat index: 9, shape: torch.Size([2, 512, 16, 16])
Feat index: 10, shape: torch.Size([2, 512, 16, 16])
Feat index: 11, shape: torch.Size([2, 512, 16, 16])
```
```py
model = timm.create_model('vit_medium_patch16_reg1_gap_256', pretrained=True)
print('Original params:', sum([p.numel() for p in model.parameters()]))
indices = model.prune_intermediate_layers(indices=(-2,), prune_head=True, prune_norm=True) # prune head, norm, last block
print('Pruned params:', sum([p.numel() for p in model.parameters()]))
intermediates = model.forward_intermediates(torch.randn(2,3,256,256), indices=indices, intermediates_only=True) # return penultimate intermediate
for o in intermediates:
print(f'Feat shape: {o.shape}')
```
```text
Original params: 38880232
Pruned params: 35212800
Feat shape: torch.Size([2, 512, 16, 16])
```
| pytorch-image-models/hfdocs/source/feature_extraction.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/feature_extraction.mdx",
"repo_id": "pytorch-image-models",
"token_count": 3391
} |
# EfficientNet
**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scale network width, depth, and resolution in a principled way.
The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image.
The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('efficientnet_b0', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `efficientnet_b0`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('efficientnet_b0', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@misc{tan2020efficientnet,
title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks},
author={Mingxing Tan and Quoc V. Le},
year={2020},
eprint={1905.11946},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
```
<!--
Type: model-index
Collections:
- Name: EfficientNet
Paper:
Title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks'
URL: https://paperswithcode.com/paper/efficientnet-rethinking-model-scaling-for
Models:
- Name: efficientnet_b0
In Collection: EfficientNet
Metadata:
FLOPs: 511241564
Parameters: 5290000
File Size: 21376743
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b0
Layers: 18
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1002
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.71%
Top 5 Accuracy: 93.52%
- Name: efficientnet_b1
In Collection: EfficientNet
Metadata:
FLOPs: 909691920
Parameters: 7790000
File Size: 31502706
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b1
Crop Pct: '0.875'
Image Size: '240'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1011
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.71%
Top 5 Accuracy: 94.15%
- Name: efficientnet_b2
In Collection: EfficientNet
Metadata:
FLOPs: 1265324514
Parameters: 9110000
File Size: 36788104
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b2
Crop Pct: '0.875'
Image Size: '260'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1020
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.38%
Top 5 Accuracy: 95.08%
- Name: efficientnet_b2a
In Collection: EfficientNet
Metadata:
FLOPs: 1452041554
Parameters: 9110000
File Size: 49369973
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b2a
Crop Pct: '1.0'
Image Size: '288'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1029
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.61%
Top 5 Accuracy: 95.32%
- Name: efficientnet_b3
In Collection: EfficientNet
Metadata:
FLOPs: 2327905920
Parameters: 12230000
File Size: 49369973
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b3
Crop Pct: '0.904'
Image Size: '300'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1038
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.08%
Top 5 Accuracy: 96.03%
- Name: efficientnet_b3a
In Collection: EfficientNet
Metadata:
FLOPs: 2600628304
Parameters: 12230000
File Size: 49369973
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b3a
Crop Pct: '1.0'
Image Size: '320'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1047
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.25%
Top 5 Accuracy: 96.11%
- Name: efficientnet_em
In Collection: EfficientNet
Metadata:
FLOPs: 3935516480
Parameters: 6900000
File Size: 27927309
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_em
Crop Pct: '0.882'
Image Size: '240'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1118
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.26%
Top 5 Accuracy: 94.79%
- Name: efficientnet_es
In Collection: EfficientNet
Metadata:
FLOPs: 2317181824
Parameters: 5440000
File Size: 22003339
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_es
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1110
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.09%
Top 5 Accuracy: 93.93%
- Name: efficientnet_lite0
In Collection: EfficientNet
Metadata:
FLOPs: 510605024
Parameters: 4650000
File Size: 18820005
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_lite0
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1163
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 75.5%
Top 5 Accuracy: 92.51%
-->
| pytorch-image-models/hfdocs/source/models/efficientnet.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/efficientnet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 4914
} |
# (Legacy) SE-ResNeXt
**SE ResNeXt** is a variant of a [ResNeXt](https://www.paperswithcode.com/method/resnext) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('legacy_seresnext101_32x4d', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `legacy_seresnext101_32x4d`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('legacy_seresnext101_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@misc{hu2019squeezeandexcitation,
title={Squeeze-and-Excitation Networks},
author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu},
year={2019},
eprint={1709.01507},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: Legacy SE ResNeXt
Paper:
Title: Squeeze-and-Excitation Networks
URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks
Models:
- Name: legacy_seresnext101_32x4d
In Collection: Legacy SE ResNeXt
Metadata:
FLOPs: 10287698672
Parameters: 48960000
File Size: 196466866
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA Titan X GPUs
ID: legacy_seresnext101_32x4d
LR: 0.6
Epochs: 100
Layers: 101
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 1024
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L462
Weights: http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.23%
Top 5 Accuracy: 95.02%
- Name: legacy_seresnext26_32x4d
In Collection: Legacy SE ResNeXt
Metadata:
FLOPs: 3187342304
Parameters: 16790000
File Size: 67346327
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA Titan X GPUs
ID: legacy_seresnext26_32x4d
LR: 0.6
Epochs: 100
Layers: 26
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 1024
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L448
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.11%
Top 5 Accuracy: 93.31%
- Name: legacy_seresnext50_32x4d
In Collection: Legacy SE ResNeXt
Metadata:
FLOPs: 5459954352
Parameters: 27560000
File Size: 110559176
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA Titan X GPUs
ID: legacy_seresnext50_32x4d
LR: 0.6
Epochs: 100
Layers: 50
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 1024
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L455
Weights: http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.08%
Top 5 Accuracy: 94.43%
--> | pytorch-image-models/hfdocs/source/models/legacy-se-resnext.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/legacy-se-resnext.mdx",
"repo_id": "pytorch-image-models",
"token_count": 2732
} |
# (Tensorflow) MobileNet v3
**MobileNetV3** is a convolutional neural network that is designed for mobile phone CPUs. The network design includes the use of a [hard swish activation](https://paperswithcode.com/method/hard-swish) and [squeeze-and-excitation](https://paperswithcode.com/method/squeeze-and-excitation-block) modules in the [MBConv blocks](https://paperswithcode.com/method/inverted-residual-block).
The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('tf_mobilenetv3_large_075', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `tf_mobilenetv3_large_075`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('tf_mobilenetv3_large_075', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/abs-1905-02244,
author = {Andrew Howard and
Mark Sandler and
Grace Chu and
Liang{-}Chieh Chen and
Bo Chen and
Mingxing Tan and
Weijun Wang and
Yukun Zhu and
Ruoming Pang and
Vijay Vasudevan and
Quoc V. Le and
Hartwig Adam},
title = {Searching for MobileNetV3},
journal = {CoRR},
volume = {abs/1905.02244},
year = {2019},
url = {http://arxiv.org/abs/1905.02244},
archivePrefix = {arXiv},
eprint = {1905.02244},
timestamp = {Tue, 12 Jan 2021 15:30:06 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-1905-02244.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: TF MobileNet V3
Paper:
Title: Searching for MobileNetV3
URL: https://paperswithcode.com/paper/searching-for-mobilenetv3
Models:
- Name: tf_mobilenetv3_large_075
In Collection: TF MobileNet V3
Metadata:
FLOPs: 194323712
Parameters: 3990000
File Size: 16097377
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x4 TPU Pod
ID: tf_mobilenetv3_large_075
LR: 0.1
Dropout: 0.8
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L394
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 73.45%
Top 5 Accuracy: 91.34%
- Name: tf_mobilenetv3_large_100
In Collection: TF MobileNet V3
Metadata:
FLOPs: 274535288
Parameters: 5480000
File Size: 22076649
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x4 TPU Pod
ID: tf_mobilenetv3_large_100
LR: 0.1
Dropout: 0.8
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L403
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 75.51%
Top 5 Accuracy: 92.61%
- Name: tf_mobilenetv3_large_minimal_100
In Collection: TF MobileNet V3
Metadata:
FLOPs: 267216928
Parameters: 3920000
File Size: 15836368
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x4 TPU Pod
ID: tf_mobilenetv3_large_minimal_100
LR: 0.1
Dropout: 0.8
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L412
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 72.24%
Top 5 Accuracy: 90.64%
- Name: tf_mobilenetv3_small_075
In Collection: TF MobileNet V3
Metadata:
FLOPs: 48457664
Parameters: 2040000
File Size: 8242701
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 16x GPUs
ID: tf_mobilenetv3_small_075
LR: 0.045
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bilinear
RMSProp Decay: 0.9
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L421
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 65.72%
Top 5 Accuracy: 86.13%
- Name: tf_mobilenetv3_small_100
In Collection: TF MobileNet V3
Metadata:
FLOPs: 65450600
Parameters: 2540000
File Size: 10256398
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 16x GPUs
ID: tf_mobilenetv3_small_100
LR: 0.045
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bilinear
RMSProp Decay: 0.9
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L430
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 67.92%
Top 5 Accuracy: 87.68%
- Name: tf_mobilenetv3_small_minimal_100
In Collection: TF MobileNet V3
Metadata:
FLOPs: 60827936
Parameters: 2040000
File Size: 8258083
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 16x GPUs
ID: tf_mobilenetv3_small_minimal_100
LR: 0.045
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bilinear
RMSProp Decay: 0.9
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L439
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 62.91%
Top 5 Accuracy: 84.24%
--> | pytorch-image-models/hfdocs/source/models/tf-mobilenet-v3.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/tf-mobilenet-v3.mdx",
"repo_id": "pytorch-image-models",
"token_count": 4783
} |
from torch.nn.modules.batchnorm import BatchNorm2d
from torchvision.ops.misc import FrozenBatchNorm2d
import timm
import pytest
from timm.utils.model import freeze, unfreeze
from timm.utils.model import ActivationStatsHook
from timm.utils.model import extract_spp_stats
from timm.utils.model import _freeze_unfreeze
from timm.utils.model import avg_sq_ch_mean, avg_ch_var, avg_ch_var_residual
from timm.utils.model import reparameterize_model
from timm.utils.model import get_state_dict
def test_freeze_unfreeze():
model = timm.create_model('resnet18')
# Freeze all
freeze(model)
# Check top level module
assert model.fc.weight.requires_grad == False
# Check submodule
assert model.layer1[0].conv1.weight.requires_grad == False
# Check BN
assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d)
# Unfreeze all
unfreeze(model)
# Check top level module
assert model.fc.weight.requires_grad == True
# Check submodule
assert model.layer1[0].conv1.weight.requires_grad == True
# Check BN
assert isinstance(model.layer1[0].bn1, BatchNorm2d)
# Freeze some
freeze(model, ['layer1', 'layer2.0'])
# Check frozen
assert model.layer1[0].conv1.weight.requires_grad == False
assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d)
assert model.layer2[0].conv1.weight.requires_grad == False
# Check not frozen
assert model.layer3[0].conv1.weight.requires_grad == True
assert isinstance(model.layer3[0].bn1, BatchNorm2d)
assert model.layer2[1].conv1.weight.requires_grad == True
# Unfreeze some
unfreeze(model, ['layer1', 'layer2.0'])
# Check not frozen
assert model.layer1[0].conv1.weight.requires_grad == True
assert isinstance(model.layer1[0].bn1, BatchNorm2d)
assert model.layer2[0].conv1.weight.requires_grad == True
# Freeze/unfreeze BN
# From root
freeze(model, ['layer1.0.bn1'])
assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d)
unfreeze(model, ['layer1.0.bn1'])
assert isinstance(model.layer1[0].bn1, BatchNorm2d)
# From direct parent
freeze(model.layer1[0], ['bn1'])
assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d)
unfreeze(model.layer1[0], ['bn1'])
assert isinstance(model.layer1[0].bn1, BatchNorm2d)
def test_activation_stats_hook_validation():
model = timm.create_model('resnet18')
def test_hook(model, input, output):
return output.mean().item()
# Test error case with mismatched lengths
with pytest.raises(ValueError, match="Please provide `hook_fns` for each `hook_fn_locs`"):
ActivationStatsHook(
model,
hook_fn_locs=['layer1.0.conv1', 'layer1.0.conv2'],
hook_fns=[test_hook]
)
def test_extract_spp_stats():
model = timm.create_model('resnet18')
def test_hook(model, input, output):
return output.mean().item()
stats = extract_spp_stats(
model,
hook_fn_locs=['layer1.0.conv1'],
hook_fns=[test_hook],
input_shape=[2, 3, 32, 32]
)
assert isinstance(stats, dict)
assert test_hook.__name__ in stats
assert isinstance(stats[test_hook.__name__], list)
assert len(stats[test_hook.__name__]) > 0
def test_freeze_unfreeze_bn_root():
import torch.nn as nn
from timm.layers import BatchNormAct2d
# Create batch norm layers
bn = nn.BatchNorm2d(10)
bn_act = BatchNormAct2d(10)
# Test with BatchNorm2d as root
with pytest.raises(AssertionError):
_freeze_unfreeze(bn, mode="freeze")
# Test with BatchNormAct2d as root
with pytest.raises(AssertionError):
_freeze_unfreeze(bn_act, mode="freeze")
def test_activation_stats_functions():
import torch
# Create sample input tensor [batch, channels, height, width]
x = torch.randn(2, 3, 4, 4)
# Test avg_sq_ch_mean
result1 = avg_sq_ch_mean(None, None, x)
assert isinstance(result1, float)
# Test avg_ch_var
result2 = avg_ch_var(None, None, x)
assert isinstance(result2, float)
# Test avg_ch_var_residual
result3 = avg_ch_var_residual(None, None, x)
assert isinstance(result3, float)
def test_reparameterize_model():
import torch.nn as nn
class FusableModule(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, 1)
def fuse(self):
return nn.Identity()
class ModelWithFusable(nn.Module):
def __init__(self):
super().__init__()
self.fusable = FusableModule()
self.normal = nn.Linear(10, 10)
model = ModelWithFusable()
# Test with inplace=False (should create a copy)
new_model = reparameterize_model(model, inplace=False)
assert isinstance(new_model.fusable, nn.Identity)
assert isinstance(model.fusable, FusableModule) # Original unchanged
# Test with inplace=True
reparameterize_model(model, inplace=True)
assert isinstance(model.fusable, nn.Identity)
def test_get_state_dict_custom_unwrap():
import torch.nn as nn
class CustomModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(10, 10)
model = CustomModel()
def custom_unwrap(m):
return m
state_dict = get_state_dict(model, unwrap_fn=custom_unwrap)
assert 'linear.weight' in state_dict
assert 'linear.bias' in state_dict
def test_freeze_unfreeze_string_input():
model = timm.create_model('resnet18')
# Test with string input
_freeze_unfreeze(model, 'layer1', mode='freeze')
assert model.layer1[0].conv1.weight.requires_grad == False
# Test unfreezing with string input
_freeze_unfreeze(model, 'layer1', mode='unfreeze')
assert model.layer1[0].conv1.weight.requires_grad == True
| pytorch-image-models/tests/test_utils.py/0 | {
"file_path": "pytorch-image-models/tests/test_utils.py",
"repo_id": "pytorch-image-models",
"token_count": 2521
} |
""" Loader Factory, Fast Collate, CUDA Prefetcher
Prefetcher and Fast Collate inspired by NVIDIA APEX example at
https://github.com/NVIDIA/apex/commit/d5e2bb4bdeedd27b1dfaf5bb2b24d6c000dee9be#diff-cf86c282ff7fba81fad27a559379d5bf
Hacked together by / Copyright 2019, Ross Wightman
"""
import logging
import random
from contextlib import suppress
from functools import partial
from itertools import repeat
from typing import Callable, Optional, Tuple, Union
import torch
import torch.utils.data
import numpy as np
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .dataset import IterableImageDataset, ImageDataset
from .distributed_sampler import OrderedDistributedSampler, RepeatAugSampler
from .random_erasing import RandomErasing
from .mixup import FastCollateMixup
from .transforms_factory import create_transform
_logger = logging.getLogger(__name__)
def fast_collate(batch):
""" A fast collation function optimized for uint8 images (np array or torch) and int64 targets (labels)"""
assert isinstance(batch[0], tuple)
batch_size = len(batch)
if isinstance(batch[0][0], tuple):
# This branch 'deinterleaves' and flattens tuples of input tensors into one tensor ordered by position
# such that all tuple of position n will end up in a torch.split(tensor, batch_size) in nth position
inner_tuple_size = len(batch[0][0])
flattened_batch_size = batch_size * inner_tuple_size
targets = torch.zeros(flattened_batch_size, dtype=torch.int64)
tensor = torch.zeros((flattened_batch_size, *batch[0][0][0].shape), dtype=torch.uint8)
for i in range(batch_size):
assert len(batch[i][0]) == inner_tuple_size # all input tensor tuples must be same length
for j in range(inner_tuple_size):
targets[i + j * batch_size] = batch[i][1]
tensor[i + j * batch_size] += torch.from_numpy(batch[i][0][j])
return tensor, targets
elif isinstance(batch[0][0], np.ndarray):
targets = torch.tensor([b[1] for b in batch], dtype=torch.int64)
assert len(targets) == batch_size
tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8)
for i in range(batch_size):
tensor[i] += torch.from_numpy(batch[i][0])
return tensor, targets
elif isinstance(batch[0][0], torch.Tensor):
targets = torch.tensor([b[1] for b in batch], dtype=torch.int64)
assert len(targets) == batch_size
tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8)
for i in range(batch_size):
tensor[i].copy_(batch[i][0])
return tensor, targets
else:
assert False
def adapt_to_chs(x, n):
if not isinstance(x, (tuple, list)):
x = tuple(repeat(x, n))
elif len(x) != n:
x_mean = np.mean(x).item()
x = (x_mean,) * n
_logger.warning(f'Pretrained mean/std different shape than model, using avg value {x}.')
else:
assert len(x) == n, 'normalization stats must match image channels'
return x
class PrefetchLoader:
def __init__(
self,
loader: torch.utils.data.DataLoader,
mean: Tuple[float, ...] = IMAGENET_DEFAULT_MEAN,
std: Tuple[float, ...] = IMAGENET_DEFAULT_STD,
channels: int = 3,
device: torch.device = torch.device('cuda'),
img_dtype: Optional[torch.dtype] = None,
fp16: bool = False,
re_prob: float = 0.,
re_mode: str = 'const',
re_count: int = 1,
re_num_splits: int = 0,
):
mean = adapt_to_chs(mean, channels)
std = adapt_to_chs(std, channels)
normalization_shape = (1, channels, 1, 1)
self.loader = loader
self.device = device
if fp16:
# fp16 arg is deprecated, but will override dtype arg if set for bwd compat
img_dtype = torch.float16
self.img_dtype = img_dtype or torch.float32
self.mean = torch.tensor(
[x * 255 for x in mean], device=device, dtype=img_dtype).view(normalization_shape)
self.std = torch.tensor(
[x * 255 for x in std], device=device, dtype=img_dtype).view(normalization_shape)
if re_prob > 0.:
self.random_erasing = RandomErasing(
probability=re_prob,
mode=re_mode,
max_count=re_count,
num_splits=re_num_splits,
device=device,
)
else:
self.random_erasing = None
self.is_cuda = device.type == 'cuda' and torch.cuda.is_available()
self.is_npu = device.type == 'npu' and torch.npu.is_available()
def __iter__(self):
first = True
if self.is_cuda:
stream = torch.cuda.Stream()
stream_context = partial(torch.cuda.stream, stream=stream)
elif self.is_npu:
stream = torch.npu.Stream()
stream_context = partial(torch.npu.stream, stream=stream)
else:
stream = None
stream_context = suppress
for next_input, next_target in self.loader:
with stream_context():
next_input = next_input.to(device=self.device, non_blocking=True)
next_target = next_target.to(device=self.device, non_blocking=True)
next_input = next_input.to(self.img_dtype).sub_(self.mean).div_(self.std)
if self.random_erasing is not None:
next_input = self.random_erasing(next_input)
if not first:
yield input, target
else:
first = False
if stream is not None:
if self.is_cuda:
torch.cuda.current_stream().wait_stream(stream)
elif self.is_npu:
torch.npu.current_stream().wait_stream(stream)
input = next_input
target = next_target
yield input, target
def __len__(self):
return len(self.loader)
@property
def sampler(self):
return self.loader.sampler
@property
def dataset(self):
return self.loader.dataset
@property
def mixup_enabled(self):
if isinstance(self.loader.collate_fn, FastCollateMixup):
return self.loader.collate_fn.mixup_enabled
else:
return False
@mixup_enabled.setter
def mixup_enabled(self, x):
if isinstance(self.loader.collate_fn, FastCollateMixup):
self.loader.collate_fn.mixup_enabled = x
def _worker_init(worker_id, worker_seeding='all'):
worker_info = torch.utils.data.get_worker_info()
assert worker_info.id == worker_id
if isinstance(worker_seeding, Callable):
seed = worker_seeding(worker_info)
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed % (2 ** 32 - 1))
else:
assert worker_seeding in ('all', 'part')
# random / torch seed already called in dataloader iter class w/ worker_info.seed
# to reproduce some old results (same seed + hparam combo), partial seeding is required (skip numpy re-seed)
if worker_seeding == 'all':
np.random.seed(worker_info.seed % (2 ** 32 - 1))
def create_loader(
dataset: Union[ImageDataset, IterableImageDataset],
input_size: Union[int, Tuple[int, int], Tuple[int, int, int]],
batch_size: int,
is_training: bool = False,
no_aug: bool = False,
re_prob: float = 0.,
re_mode: str = 'const',
re_count: int = 1,
re_split: bool = False,
train_crop_mode: Optional[str] = None,
scale: Optional[Tuple[float, float]] = None,
ratio: Optional[Tuple[float, float]] = None,
hflip: float = 0.5,
vflip: float = 0.,
color_jitter: float = 0.4,
color_jitter_prob: Optional[float] = None,
grayscale_prob: float = 0.,
gaussian_blur_prob: float = 0.,
auto_augment: Optional[str] = None,
num_aug_repeats: int = 0,
num_aug_splits: int = 0,
interpolation: str = 'bilinear',
mean: Tuple[float, ...] = IMAGENET_DEFAULT_MEAN,
std: Tuple[float, ...] = IMAGENET_DEFAULT_STD,
num_workers: int = 1,
distributed: bool = False,
crop_pct: Optional[float] = None,
crop_mode: Optional[str] = None,
crop_border_pixels: Optional[int] = None,
collate_fn: Optional[Callable] = None,
pin_memory: bool = False,
fp16: bool = False, # deprecated, use img_dtype
img_dtype: torch.dtype = torch.float32,
device: torch.device = torch.device('cuda'),
use_prefetcher: bool = True,
use_multi_epochs_loader: bool = False,
persistent_workers: bool = True,
worker_seeding: str = 'all',
tf_preprocessing: bool = False,
):
"""
Args:
dataset: The image dataset to load.
input_size: Target input size (channels, height, width) tuple or size scalar.
batch_size: Number of samples in a batch.
is_training: Return training (random) transforms.
no_aug: Disable augmentation for training (useful for debug).
re_prob: Random erasing probability.
re_mode: Random erasing fill mode.
re_count: Number of random erasing regions.
re_split: Control split of random erasing across batch size.
scale: Random resize scale range (crop area, < 1.0 => zoom in).
ratio: Random aspect ratio range (crop ratio for RRC, ratio adjustment factor for RKR).
hflip: Horizontal flip probability.
vflip: Vertical flip probability.
color_jitter: Random color jitter component factors (brightness, contrast, saturation, hue).
Scalar is applied as (scalar,) * 3 (no hue).
color_jitter_prob: Apply color jitter with this probability if not None (for SimlCLR-like aug
grayscale_prob: Probability of converting image to grayscale (for SimCLR-like aug).
gaussian_blur_prob: Probability of applying gaussian blur (for SimCLR-like aug).
auto_augment: Auto augment configuration string (see auto_augment.py).
num_aug_repeats: Enable special sampler to repeat same augmentation across distributed GPUs.
num_aug_splits: Enable mode where augmentations can be split across the batch.
interpolation: Image interpolation mode.
mean: Image normalization mean.
std: Image normalization standard deviation.
num_workers: Num worker processes per DataLoader.
distributed: Enable dataloading for distributed training.
crop_pct: Inference crop percentage (output size / resize size).
crop_mode: Inference crop mode. One of ['squash', 'border', 'center']. Defaults to 'center' when None.
crop_border_pixels: Inference crop border of specified # pixels around edge of original image.
collate_fn: Override default collate_fn.
pin_memory: Pin memory for device transfer.
fp16: Deprecated argument for half-precision input dtype. Use img_dtype.
img_dtype: Data type for input image.
device: Device to transfer inputs and targets to.
use_prefetcher: Use efficient pre-fetcher to load samples onto device.
use_multi_epochs_loader:
persistent_workers: Enable persistent worker processes.
worker_seeding: Control worker random seeding at init.
tf_preprocessing: Use TF 1.0 inference preprocessing for testing model ports.
Returns:
DataLoader
"""
re_num_splits = 0
if re_split:
# apply RE to second half of batch if no aug split otherwise line up with aug split
re_num_splits = num_aug_splits or 2
dataset.transform = create_transform(
input_size,
is_training=is_training,
no_aug=no_aug,
train_crop_mode=train_crop_mode,
scale=scale,
ratio=ratio,
hflip=hflip,
vflip=vflip,
color_jitter=color_jitter,
color_jitter_prob=color_jitter_prob,
grayscale_prob=grayscale_prob,
gaussian_blur_prob=gaussian_blur_prob,
auto_augment=auto_augment,
interpolation=interpolation,
mean=mean,
std=std,
crop_pct=crop_pct,
crop_mode=crop_mode,
crop_border_pixels=crop_border_pixels,
re_prob=re_prob,
re_mode=re_mode,
re_count=re_count,
re_num_splits=re_num_splits,
tf_preprocessing=tf_preprocessing,
use_prefetcher=use_prefetcher,
separate=num_aug_splits > 0,
)
if isinstance(dataset, IterableImageDataset):
# give Iterable datasets early knowledge of num_workers so that sample estimates
# are correct before worker processes are launched
dataset.set_loader_cfg(num_workers=num_workers)
sampler = None
if distributed and not isinstance(dataset, torch.utils.data.IterableDataset):
if is_training:
if num_aug_repeats:
sampler = RepeatAugSampler(dataset, num_repeats=num_aug_repeats)
else:
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
# This will add extra duplicate entries to result in equal num
# of samples per-process, will slightly alter validation results
sampler = OrderedDistributedSampler(dataset)
else:
assert num_aug_repeats == 0, "RepeatAugment not currently supported in non-distributed or IterableDataset use"
if collate_fn is None:
collate_fn = fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate
loader_class = torch.utils.data.DataLoader
if use_multi_epochs_loader:
loader_class = MultiEpochsDataLoader
loader_args = dict(
batch_size=batch_size,
shuffle=not isinstance(dataset, torch.utils.data.IterableDataset) and sampler is None and is_training,
num_workers=num_workers,
sampler=sampler,
collate_fn=collate_fn,
pin_memory=pin_memory,
drop_last=is_training,
worker_init_fn=partial(_worker_init, worker_seeding=worker_seeding),
persistent_workers=persistent_workers
)
try:
loader = loader_class(dataset, **loader_args)
except TypeError as e:
loader_args.pop('persistent_workers') # only in Pytorch 1.7+
loader = loader_class(dataset, **loader_args)
if use_prefetcher:
prefetch_re_prob = re_prob if is_training and not no_aug else 0.
loader = PrefetchLoader(
loader,
mean=mean,
std=std,
channels=input_size[0],
device=device,
fp16=fp16, # deprecated, use img_dtype
img_dtype=img_dtype,
re_prob=prefetch_re_prob,
re_mode=re_mode,
re_count=re_count,
re_num_splits=re_num_splits
)
return loader
class MultiEpochsDataLoader(torch.utils.data.DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._DataLoader__initialized = False
if self.batch_sampler is None:
self.sampler = _RepeatSampler(self.sampler)
else:
self.batch_sampler = _RepeatSampler(self.batch_sampler)
self._DataLoader__initialized = True
self.iterator = super().__iter__()
def __len__(self):
return len(self.sampler) if self.batch_sampler is None else len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever.
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
| pytorch-image-models/timm/data/loader.py/0 | {
"file_path": "pytorch-image-models/timm/data/loader.py",
"repo_id": "pytorch-image-models",
"token_count": 7038
} |
""" Real labels evaluator for ImageNet
Paper: `Are we done with ImageNet?` - https://arxiv.org/abs/2006.07159
Based on Numpy example at https://github.com/google-research/reassessed-imagenet
Hacked together by / Copyright 2020 Ross Wightman
"""
import os
import json
import numpy as np
import pkgutil
class RealLabelsImagenet:
def __init__(self, filenames, real_json=None, topk=(1, 5)):
if real_json is not None:
with open(real_json) as real_labels:
real_labels = json.load(real_labels)
else:
real_labels = json.loads(
pkgutil.get_data(__name__, os.path.join('_info', 'imagenet_real_labels.json')).decode('utf-8'))
real_labels = {f'ILSVRC2012_val_{i + 1:08d}.JPEG': labels for i, labels in enumerate(real_labels)}
self.real_labels = real_labels
self.filenames = filenames
assert len(self.filenames) == len(self.real_labels)
self.topk = topk
self.is_correct = {k: [] for k in topk}
self.sample_idx = 0
def add_result(self, output):
maxk = max(self.topk)
_, pred_batch = output.topk(maxk, 1, True, True)
pred_batch = pred_batch.cpu().numpy()
for pred in pred_batch:
filename = self.filenames[self.sample_idx]
filename = os.path.basename(filename)
if self.real_labels[filename]:
for k in self.topk:
self.is_correct[k].append(
any([p in self.real_labels[filename] for p in pred[:k]]))
self.sample_idx += 1
def get_accuracy(self, k=None):
if k is None:
return {k: float(np.mean(self.is_correct[k])) * 100 for k in self.topk}
else:
return float(np.mean(self.is_correct[k])) * 100
| pytorch-image-models/timm/data/real_labels.py/0 | {
"file_path": "pytorch-image-models/timm/data/real_labels.py",
"repo_id": "pytorch-image-models",
"token_count": 854
} |
""" Model / Layer Config singleton state
"""
import os
import warnings
from typing import Any, Optional
import torch
__all__ = [
'is_exportable', 'is_scriptable', 'is_no_jit', 'use_fused_attn',
'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config', 'set_fused_attn',
'set_reentrant_ckpt', 'use_reentrant_ckpt'
]
# Set to True if prefer to have layers with no jit optimization (includes activations)
_NO_JIT = False
# Set to True if prefer to have activation layers with no jit optimization
# NOTE not currently used as no difference between no_jit and no_activation jit as only layers obeying
# the jit flags so far are activations. This will change as more layers are updated and/or added.
_NO_ACTIVATION_JIT = False
# Set to True if exporting a model with Same padding via ONNX
_EXPORTABLE = False
# Set to True if wanting to use torch.jit.script on a model
_SCRIPTABLE = False
# use torch.scaled_dot_product_attention where possible
_HAS_FUSED_ATTN = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
if 'TIMM_FUSED_ATTN' in os.environ:
_USE_FUSED_ATTN = int(os.environ['TIMM_FUSED_ATTN'])
else:
_USE_FUSED_ATTN = 1 # 0 == off, 1 == on (for tested use), 2 == on (for experimental use)
if 'TIMM_REENTRANT_CKPT' in os.environ:
_USE_REENTRANT_CKPT = bool(os.environ['TIMM_REENTRANT_CKPT'])
else:
_USE_REENTRANT_CKPT = False # defaults to disabled (off)
def is_no_jit():
return _NO_JIT
class set_no_jit:
def __init__(self, mode: bool) -> None:
global _NO_JIT
self.prev = _NO_JIT
_NO_JIT = mode
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> bool:
global _NO_JIT
_NO_JIT = self.prev
return False
def is_exportable():
return _EXPORTABLE
class set_exportable:
def __init__(self, mode: bool) -> None:
global _EXPORTABLE
self.prev = _EXPORTABLE
_EXPORTABLE = mode
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> bool:
global _EXPORTABLE
_EXPORTABLE = self.prev
return False
def is_scriptable():
return _SCRIPTABLE
class set_scriptable:
def __init__(self, mode: bool) -> None:
global _SCRIPTABLE
self.prev = _SCRIPTABLE
_SCRIPTABLE = mode
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> bool:
global _SCRIPTABLE
_SCRIPTABLE = self.prev
return False
class set_layer_config:
""" Layer config context manager that allows setting all layer config flags at once.
If a flag arg is None, it will not change the current value.
"""
def __init__(
self,
scriptable: Optional[bool] = None,
exportable: Optional[bool] = None,
no_jit: Optional[bool] = None,
no_activation_jit: Optional[bool] = None):
global _SCRIPTABLE
global _EXPORTABLE
global _NO_JIT
global _NO_ACTIVATION_JIT
self.prev = _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT
if scriptable is not None:
_SCRIPTABLE = scriptable
if exportable is not None:
_EXPORTABLE = exportable
if no_jit is not None:
_NO_JIT = no_jit
if no_activation_jit is not None:
_NO_ACTIVATION_JIT = no_activation_jit
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> bool:
global _SCRIPTABLE
global _EXPORTABLE
global _NO_JIT
global _NO_ACTIVATION_JIT
_SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT = self.prev
return False
def use_fused_attn(experimental: bool = False) -> bool:
# NOTE: ONNX export cannot handle F.scaled_dot_product_attention as of pytorch 2.0
if not _HAS_FUSED_ATTN or _EXPORTABLE:
return False
if experimental:
return _USE_FUSED_ATTN > 1
return _USE_FUSED_ATTN > 0
def set_fused_attn(enable: bool = True, experimental: bool = False):
global _USE_FUSED_ATTN
if not _HAS_FUSED_ATTN:
warnings.warn('This version of pytorch does not have F.scaled_dot_product_attention, fused_attn flag ignored.')
return
if experimental and enable:
_USE_FUSED_ATTN = 2
elif enable:
_USE_FUSED_ATTN = 1
else:
_USE_FUSED_ATTN = 0
def use_reentrant_ckpt() -> bool:
return _USE_REENTRANT_CKPT
def set_reentrant_ckpt(enable: bool = True):
global _USE_REENTRANT_CKPT
_USE_REENTRANT_CKPT = enable
| pytorch-image-models/timm/layers/config.py/0 | {
"file_path": "pytorch-image-models/timm/layers/config.py",
"repo_id": "pytorch-image-models",
"token_count": 1974
} |
from typing import Tuple
import torch
def ndgrid(*tensors) -> Tuple[torch.Tensor, ...]:
"""generate N-D grid in dimension order.
The ndgrid function is like meshgrid except that the order of the first two input arguments are switched.
That is, the statement
[X1,X2,X3] = ndgrid(x1,x2,x3)
produces the same result as
[X2,X1,X3] = meshgrid(x2,x1,x3)
This naming is based on MATLAB, the purpose is to avoid confusion due to torch's change to make
torch.meshgrid behaviour move from matching ndgrid ('ij') indexing to numpy meshgrid defaults of ('xy').
"""
try:
return torch.meshgrid(*tensors, indexing='ij')
except TypeError:
# old PyTorch < 1.10 will follow this path as it does not have indexing arg,
# the old behaviour of meshgrid was 'ij'
return torch.meshgrid(*tensors)
def meshgrid(*tensors) -> Tuple[torch.Tensor, ...]:
"""generate N-D grid in spatial dim order.
The meshgrid function is similar to ndgrid except that the order of the
first two input and output arguments is switched.
That is, the statement
[X,Y,Z] = meshgrid(x,y,z)
produces the same result as
[Y,X,Z] = ndgrid(y,x,z)
Because of this, meshgrid is better suited to problems in two- or three-dimensional Cartesian space,
while ndgrid is better suited to multidimensional problems that aren't spatially based.
"""
# NOTE: this will throw in PyTorch < 1.10 as meshgrid did not support indexing arg or have
# capability of generating grid in xy order before then.
return torch.meshgrid(*tensors, indexing='xy')
| pytorch-image-models/timm/layers/grid.py/0 | {
"file_path": "pytorch-image-models/timm/layers/grid.py",
"repo_id": "pytorch-image-models",
"token_count": 565
} |
""" Normalization + Activation Layers
Provides Norm+Act fns for standard PyTorch norm layers such as
* BatchNorm
* GroupNorm
* LayerNorm
This allows swapping with alternative layers that are natively both norm + act such as
* EvoNorm (evo_norm.py)
* FilterResponseNorm (filter_response_norm.py)
* InplaceABN (inplace_abn.py)
Hacked together by / Copyright 2022 Ross Wightman
"""
from typing import Union, List, Optional, Any
import torch
from torch import nn as nn
from torch.nn import functional as F
from torchvision.ops.misc import FrozenBatchNorm2d
from .create_act import create_act_layer
from .fast_norm import is_fast_norm, fast_group_norm, fast_layer_norm
from .trace_utils import _assert
def _create_act(act_layer, act_kwargs=None, inplace=False, apply_act=True):
act_kwargs = act_kwargs or {}
act_kwargs.setdefault('inplace', inplace)
act = None
if apply_act:
act = create_act_layer(act_layer, **act_kwargs)
return nn.Identity() if act is None else act
class BatchNormAct2d(nn.BatchNorm2d):
"""BatchNorm + Activation
This module performs BatchNorm + Activation in a manner that will remain backwards
compatible with weights trained with separate bn, act. This is why we inherit from BN
instead of composing it as a .bn member.
"""
def __init__(
self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True,
apply_act=True,
act_layer=nn.ReLU,
act_kwargs=None,
inplace=True,
drop_layer=None,
device=None,
dtype=None,
):
try:
factory_kwargs = {'device': device, 'dtype': dtype}
super(BatchNormAct2d, self).__init__(
num_features,
eps=eps,
momentum=momentum,
affine=affine,
track_running_stats=track_running_stats,
**factory_kwargs,
)
except TypeError:
# NOTE for backwards compat with old PyTorch w/o factory device/dtype support
super(BatchNormAct2d, self).__init__(
num_features,
eps=eps,
momentum=momentum,
affine=affine,
track_running_stats=track_running_stats,
)
self.drop = drop_layer() if drop_layer is not None else nn.Identity()
self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act)
def forward(self, x):
# cut & paste of torch.nn.BatchNorm2d.forward impl to avoid issues with torchscript and tracing
_assert(x.ndim == 4, f'expected 4D input (got {x.ndim}D input)')
# exponential_average_factor is set to self.momentum
# (when it is available) only so that it gets updated
# in ONNX graph when this node is exported to ONNX.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None: # type: ignore[has-type]
self.num_batches_tracked.add_(1) # type: ignore[has-type]
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
r"""
Decide whether the mini-batch stats should be used for normalization rather than the buffers.
Mini-batch stats are used in training mode, and in eval mode when buffers are None.
"""
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (self.running_var is None)
r"""
Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
used for normalization (i.e. in eval mode when buffers are not None).
"""
x = F.batch_norm(
x,
# If buffers are not to be tracked, ensure that they won't be updated
self.running_mean if not self.training or self.track_running_stats else None,
self.running_var if not self.training or self.track_running_stats else None,
self.weight,
self.bias,
bn_training,
exponential_average_factor,
self.eps,
)
x = self.drop(x)
x = self.act(x)
return x
class SyncBatchNormAct(nn.SyncBatchNorm):
# Thanks to Selim Seferbekov (https://github.com/rwightman/pytorch-image-models/issues/1254)
# This is a quick workaround to support SyncBatchNorm for timm BatchNormAct2d layers
# but ONLY when used in conjunction with the timm conversion function below.
# Do not create this module directly or use the PyTorch conversion function.
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = super().forward(x) # SyncBN doesn't work with torchscript anyways, so this is fine
if hasattr(self, "drop"):
x = self.drop(x)
if hasattr(self, "act"):
x = self.act(x)
return x
def convert_sync_batchnorm(module, process_group=None):
# convert both BatchNorm and BatchNormAct layers to Synchronized variants
module_output = module
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
if isinstance(module, BatchNormAct2d):
# convert timm norm + act layer
module_output = SyncBatchNormAct(
module.num_features,
module.eps,
module.momentum,
module.affine,
module.track_running_stats,
process_group=process_group,
)
# set act and drop attr from the original module
module_output.act = module.act
module_output.drop = module.drop
else:
# convert standard BatchNorm layers
module_output = torch.nn.SyncBatchNorm(
module.num_features,
module.eps,
module.momentum,
module.affine,
module.track_running_stats,
process_group,
)
if module.affine:
with torch.no_grad():
module_output.weight = module.weight
module_output.bias = module.bias
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
module_output.training = module.training
if hasattr(module, "qconfig"):
module_output.qconfig = module.qconfig
for name, child in module.named_children():
module_output.add_module(name, convert_sync_batchnorm(child, process_group))
del module
return module_output
class FrozenBatchNormAct2d(torch.nn.Module):
"""
BatchNormAct2d where the batch statistics and the affine parameters are fixed
Args:
num_features (int): Number of features ``C`` from an expected input of size ``(N, C, H, W)``
eps (float): a value added to the denominator for numerical stability. Default: 1e-5
"""
def __init__(
self,
num_features: int,
eps: float = 1e-5,
apply_act=True,
act_layer=nn.ReLU,
act_kwargs=None,
inplace=True,
drop_layer=None,
):
super().__init__()
self.eps = eps
self.register_buffer("weight", torch.ones(num_features))
self.register_buffer("bias", torch.zeros(num_features))
self.register_buffer("running_mean", torch.zeros(num_features))
self.register_buffer("running_var", torch.ones(num_features))
self.drop = drop_layer() if drop_layer is not None else nn.Identity()
self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act)
def _load_from_state_dict(
self,
state_dict: dict,
prefix: str,
local_metadata: dict,
strict: bool,
missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str],
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
scale = w * (rv + self.eps).rsqrt()
bias = b - rm * scale
x = x * scale + bias
x = self.act(self.drop(x))
return x
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.weight.shape[0]}, eps={self.eps}, act={self.act})"
def freeze_batch_norm_2d(module):
"""
Converts all `BatchNorm2d` and `SyncBatchNorm` or `BatchNormAct2d` and `SyncBatchNormAct2d` layers
of provided module into `FrozenBatchNorm2d` or `FrozenBatchNormAct2d` respectively.
Args:
module (torch.nn.Module): Any PyTorch module.
Returns:
torch.nn.Module: Resulting module
Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
"""
res = module
if isinstance(module, (BatchNormAct2d, SyncBatchNormAct)):
res = FrozenBatchNormAct2d(module.num_features)
res.num_features = module.num_features
res.affine = module.affine
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
res.drop = module.drop
res.act = module.act
elif isinstance(module, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)):
res = FrozenBatchNorm2d(module.num_features)
res.num_features = module.num_features
res.affine = module.affine
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for name, child in module.named_children():
new_child = freeze_batch_norm_2d(child)
if new_child is not child:
res.add_module(name, new_child)
return res
def unfreeze_batch_norm_2d(module):
"""
Converts all `FrozenBatchNorm2d` layers of provided module into `BatchNorm2d`. If `module` is itself and instance
of `FrozenBatchNorm2d`, it is converted into `BatchNorm2d` and returned. Otherwise, the module is walked
recursively and submodules are converted in place.
Args:
module (torch.nn.Module): Any PyTorch module.
Returns:
torch.nn.Module: Resulting module
Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
"""
res = module
if isinstance(module, FrozenBatchNormAct2d):
res = BatchNormAct2d(module.num_features)
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
res.drop = module.drop
res.act = module.act
elif isinstance(module, FrozenBatchNorm2d):
res = torch.nn.BatchNorm2d(module.num_features)
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for name, child in module.named_children():
new_child = unfreeze_batch_norm_2d(child)
if new_child is not child:
res.add_module(name, new_child)
return res
def _num_groups(num_channels, num_groups, group_size):
if group_size:
assert num_channels % group_size == 0
return num_channels // group_size
return num_groups
class GroupNormAct(nn.GroupNorm):
# NOTE num_channel and num_groups order flipped for easier layer swaps / binding of fixed args
def __init__(
self,
num_channels,
num_groups=32,
eps=1e-5,
affine=True,
group_size=None,
apply_act=True,
act_layer=nn.ReLU,
act_kwargs=None,
inplace=True,
drop_layer=None,
):
super(GroupNormAct, self).__init__(
_num_groups(num_channels, num_groups, group_size),
num_channels,
eps=eps,
affine=affine,
)
self.drop = drop_layer() if drop_layer is not None else nn.Identity()
self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act)
self._fast_norm = is_fast_norm()
def forward(self, x):
if self._fast_norm:
x = fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
else:
x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
x = self.drop(x)
x = self.act(x)
return x
class GroupNorm1Act(nn.GroupNorm):
def __init__(
self,
num_channels,
eps=1e-5,
affine=True,
apply_act=True,
act_layer=nn.ReLU,
act_kwargs=None,
inplace=True,
drop_layer=None,
):
super(GroupNorm1Act, self).__init__(1, num_channels, eps=eps, affine=affine)
self.drop = drop_layer() if drop_layer is not None else nn.Identity()
self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act)
self._fast_norm = is_fast_norm()
def forward(self, x):
if self._fast_norm:
x = fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
else:
x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
x = self.drop(x)
x = self.act(x)
return x
class LayerNormAct(nn.LayerNorm):
def __init__(
self,
normalization_shape: Union[int, List[int], torch.Size],
eps=1e-5,
affine=True,
apply_act=True,
act_layer=nn.ReLU,
act_kwargs=None,
inplace=True,
drop_layer=None,
):
super(LayerNormAct, self).__init__(normalization_shape, eps=eps, elementwise_affine=affine)
self.drop = drop_layer() if drop_layer is not None else nn.Identity()
self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act)
self._fast_norm = is_fast_norm()
def forward(self, x):
if self._fast_norm:
x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
else:
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
x = self.drop(x)
x = self.act(x)
return x
class LayerNormAct2d(nn.LayerNorm):
def __init__(
self,
num_channels,
eps=1e-5,
affine=True,
apply_act=True,
act_layer=nn.ReLU,
act_kwargs=None,
inplace=True,
drop_layer=None,
):
super(LayerNormAct2d, self).__init__(num_channels, eps=eps, elementwise_affine=affine)
self.drop = drop_layer() if drop_layer is not None else nn.Identity()
self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act)
self._fast_norm = is_fast_norm()
def forward(self, x):
x = x.permute(0, 2, 3, 1)
if self._fast_norm:
x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
else:
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
x = x.permute(0, 3, 1, 2)
x = self.drop(x)
x = self.act(x)
return x
| pytorch-image-models/timm/layers/norm_act.py/0 | {
"file_path": "pytorch-image-models/timm/layers/norm_act.py",
"repo_id": "pytorch-image-models",
"token_count": 7990
} |
try:
from torch import _assert
except ImportError:
def _assert(condition: bool, message: str):
assert condition, message
def _float_to_int(x: float) -> int:
"""
Symbolic tracing helper to substitute for inbuilt `int`.
Hint: Inbuilt `int` can't accept an argument of type `Proxy`
"""
return int(x)
| pytorch-image-models/timm/layers/trace_utils.py/0 | {
"file_path": "pytorch-image-models/timm/layers/trace_utils.py",
"repo_id": "pytorch-image-models",
"token_count": 119
} |
import hashlib
import json
import logging
import os
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Iterable, List, Optional, Tuple, Union
import torch
from torch.hub import HASH_REGEX, download_url_to_file, urlparse
try:
from torch.hub import get_dir
except ImportError:
from torch.hub import _get_torch_home as get_dir
try:
import safetensors.torch
_has_safetensors = True
except ImportError:
_has_safetensors = False
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
from timm import __version__
from timm.models._pretrained import filter_pretrained_cfg
try:
from huggingface_hub import (
create_repo, get_hf_file_metadata,
hf_hub_download, hf_hub_url,
repo_type_and_id_from_hf_id, upload_folder)
from huggingface_hub.utils import EntryNotFoundError
hf_hub_download = partial(hf_hub_download, library_name="timm", library_version=__version__)
_has_hf_hub = True
except ImportError:
hf_hub_download = None
_has_hf_hub = False
_logger = logging.getLogger(__name__)
__all__ = ['get_cache_dir', 'download_cached_file', 'has_hf_hub', 'hf_split', 'load_model_config_from_hf',
'load_state_dict_from_hf', 'save_for_hf', 'push_to_hf_hub']
# Default name for a weights file hosted on the Huggingface Hub.
HF_WEIGHTS_NAME = "pytorch_model.bin" # default pytorch pkl
HF_SAFE_WEIGHTS_NAME = "model.safetensors" # safetensors version
HF_OPEN_CLIP_WEIGHTS_NAME = "open_clip_pytorch_model.bin" # default pytorch pkl
HF_OPEN_CLIP_SAFE_WEIGHTS_NAME = "open_clip_model.safetensors" # safetensors version
def get_cache_dir(child_dir: str = ''):
"""
Returns the location of the directory where models are cached (and creates it if necessary).
"""
# Issue warning to move data if old env is set
if os.getenv('TORCH_MODEL_ZOO'):
_logger.warning('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead')
hub_dir = get_dir()
child_dir = () if not child_dir else (child_dir,)
model_dir = os.path.join(hub_dir, 'checkpoints', *child_dir)
os.makedirs(model_dir, exist_ok=True)
return model_dir
def download_cached_file(
url: Union[str, List[str], Tuple[str, str]],
check_hash: bool = True,
progress: bool = False,
cache_dir: Optional[Union[str, Path]] = None,
):
if isinstance(url, (list, tuple)):
url, filename = url
else:
parts = urlparse(url)
filename = os.path.basename(parts.path)
if cache_dir:
os.makedirs(cache_dir, exist_ok=True)
else:
cache_dir = get_cache_dir()
cached_file = os.path.join(cache_dir, filename)
if not os.path.exists(cached_file):
_logger.info('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = None
if check_hash:
r = HASH_REGEX.search(filename) # r is Optional[Match[str]]
hash_prefix = r.group(1) if r else None
download_url_to_file(url, cached_file, hash_prefix, progress=progress)
return cached_file
def check_cached_file(
url: Union[str, List[str], Tuple[str, str]],
check_hash: bool = True,
cache_dir: Optional[Union[str, Path]] = None,
):
if isinstance(url, (list, tuple)):
url, filename = url
else:
parts = urlparse(url)
filename = os.path.basename(parts.path)
if not cache_dir:
cache_dir = get_cache_dir()
cached_file = os.path.join(cache_dir, filename)
if os.path.exists(cached_file):
if check_hash:
r = HASH_REGEX.search(filename) # r is Optional[Match[str]]
hash_prefix = r.group(1) if r else None
if hash_prefix:
with open(cached_file, 'rb') as f:
hd = hashlib.sha256(f.read()).hexdigest()
if hd[:len(hash_prefix)] != hash_prefix:
return False
return True
return False
def has_hf_hub(necessary: bool = False):
if not _has_hf_hub and necessary:
# if no HF Hub module installed, and it is necessary to continue, raise error
raise RuntimeError(
'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.')
return _has_hf_hub
def hf_split(hf_id: str):
# FIXME I may change @ -> # and be parsed as fragment in a URI model name scheme
rev_split = hf_id.split('@')
assert 0 < len(rev_split) <= 2, 'hf_hub id should only contain one @ character to identify revision.'
hf_model_id = rev_split[0]
hf_revision = rev_split[-1] if len(rev_split) > 1 else None
return hf_model_id, hf_revision
def load_cfg_from_json(json_file: Union[str, Path]):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def download_from_hf(
model_id: str,
filename: str,
cache_dir: Optional[Union[str, Path]] = None,
):
hf_model_id, hf_revision = hf_split(model_id)
return hf_hub_download(
hf_model_id,
filename,
revision=hf_revision,
cache_dir=cache_dir,
)
def load_model_config_from_hf(
model_id: str,
cache_dir: Optional[Union[str, Path]] = None,
):
assert has_hf_hub(True)
cached_file = download_from_hf(model_id, 'config.json', cache_dir=cache_dir)
hf_config = load_cfg_from_json(cached_file)
if 'pretrained_cfg' not in hf_config:
# old form, pull pretrain_cfg out of the base dict
pretrained_cfg = hf_config
hf_config = {}
hf_config['architecture'] = pretrained_cfg.pop('architecture')
hf_config['num_features'] = pretrained_cfg.pop('num_features', None)
if 'labels' in pretrained_cfg: # deprecated name for 'label_names'
pretrained_cfg['label_names'] = pretrained_cfg.pop('labels')
hf_config['pretrained_cfg'] = pretrained_cfg
# NOTE currently discarding parent config as only arch name and pretrained_cfg used in timm right now
pretrained_cfg = hf_config['pretrained_cfg']
pretrained_cfg['hf_hub_id'] = model_id # insert hf_hub id for pretrained weight load during model creation
pretrained_cfg['source'] = 'hf-hub'
# model should be created with base config num_classes if its exist
if 'num_classes' in hf_config:
pretrained_cfg['num_classes'] = hf_config['num_classes']
# label meta-data in base config overrides saved pretrained_cfg on load
if 'label_names' in hf_config:
pretrained_cfg['label_names'] = hf_config.pop('label_names')
if 'label_descriptions' in hf_config:
pretrained_cfg['label_descriptions'] = hf_config.pop('label_descriptions')
model_args = hf_config.get('model_args', {})
model_name = hf_config['architecture']
return pretrained_cfg, model_name, model_args
def load_state_dict_from_hf(
model_id: str,
filename: str = HF_WEIGHTS_NAME,
weights_only: bool = False,
cache_dir: Optional[Union[str, Path]] = None,
):
assert has_hf_hub(True)
hf_model_id, hf_revision = hf_split(model_id)
# Look for .safetensors alternatives and load from it if it exists
if _has_safetensors:
for safe_filename in _get_safe_alternatives(filename):
try:
cached_safe_file = hf_hub_download(
repo_id=hf_model_id,
filename=safe_filename,
revision=hf_revision,
cache_dir=cache_dir,
)
_logger.info(
f"[{model_id}] Safe alternative available for '{filename}' "
f"(as '{safe_filename}'). Loading weights using safetensors.")
return safetensors.torch.load_file(cached_safe_file, device="cpu")
except EntryNotFoundError:
pass
# Otherwise, load using pytorch.load
cached_file = hf_hub_download(
hf_model_id,
filename=filename,
revision=hf_revision,
cache_dir=cache_dir,
)
_logger.debug(f"[{model_id}] Safe alternative not found for '{filename}'. Loading weights using default pytorch.")
try:
state_dict = torch.load(cached_file, map_location='cpu', weights_only=weights_only)
except TypeError:
state_dict = torch.load(cached_file, map_location='cpu')
return state_dict
def load_custom_from_hf(
model_id: str,
filename: str,
model: torch.nn.Module,
cache_dir: Optional[Union[str, Path]] = None,
):
assert has_hf_hub(True)
hf_model_id, hf_revision = hf_split(model_id)
cached_file = hf_hub_download(
hf_model_id,
filename=filename,
revision=hf_revision,
cache_dir=cache_dir,
)
return model.load_pretrained(cached_file)
def save_config_for_hf(
model: torch.nn.Module,
config_path: str,
model_config: Optional[dict] = None,
model_args: Optional[dict] = None
):
model_config = model_config or {}
hf_config = {}
pretrained_cfg = filter_pretrained_cfg(model.pretrained_cfg, remove_source=True, remove_null=True)
# set some values at root config level
hf_config['architecture'] = pretrained_cfg.pop('architecture')
hf_config['num_classes'] = model_config.pop('num_classes', model.num_classes)
# NOTE these attr saved for informational purposes, do not impact model build
hf_config['num_features'] = model_config.pop('num_features', model.num_features)
global_pool_type = model_config.pop('global_pool', getattr(model, 'global_pool', None))
if isinstance(global_pool_type, str) and global_pool_type:
hf_config['global_pool'] = global_pool_type
# Save class label info
if 'labels' in model_config:
_logger.warning(
"'labels' as a config field for is deprecated. Please use 'label_names' and 'label_descriptions'."
" Renaming provided 'labels' field to 'label_names'.")
model_config.setdefault('label_names', model_config.pop('labels'))
label_names = model_config.pop('label_names', None)
if label_names:
assert isinstance(label_names, (dict, list, tuple))
# map label id (classifier index) -> unique label name (ie synset for ImageNet, MID for OpenImages)
# can be a dict id: name if there are id gaps, or tuple/list if no gaps.
hf_config['label_names'] = label_names
label_descriptions = model_config.pop('label_descriptions', None)
if label_descriptions:
assert isinstance(label_descriptions, dict)
# maps label names -> descriptions
hf_config['label_descriptions'] = label_descriptions
if model_args:
hf_config['model_args'] = model_args
hf_config['pretrained_cfg'] = pretrained_cfg
hf_config.update(model_config)
with config_path.open('w') as f:
json.dump(hf_config, f, indent=2)
def save_for_hf(
model: torch.nn.Module,
save_directory: str,
model_config: Optional[dict] = None,
model_args: Optional[dict] = None,
safe_serialization: Union[bool, Literal["both"]] = False,
):
assert has_hf_hub(True)
save_directory = Path(save_directory)
save_directory.mkdir(exist_ok=True, parents=True)
# Save model weights, either safely (using safetensors), or using legacy pytorch approach or both.
tensors = model.state_dict()
if safe_serialization is True or safe_serialization == "both":
assert _has_safetensors, "`pip install safetensors` to use .safetensors"
safetensors.torch.save_file(tensors, save_directory / HF_SAFE_WEIGHTS_NAME)
if safe_serialization is False or safe_serialization == "both":
torch.save(tensors, save_directory / HF_WEIGHTS_NAME)
config_path = save_directory / 'config.json'
save_config_for_hf(
model,
config_path,
model_config=model_config,
model_args=model_args,
)
def push_to_hf_hub(
model: torch.nn.Module,
repo_id: str,
commit_message: str = 'Add model',
token: Optional[str] = None,
revision: Optional[str] = None,
private: bool = False,
create_pr: bool = False,
model_config: Optional[dict] = None,
model_card: Optional[dict] = None,
model_args: Optional[dict] = None,
safe_serialization: Union[bool, Literal["both"]] = 'both',
):
"""
Arguments:
(...)
safe_serialization (`bool` or `"both"`, *optional*, defaults to `False`):
Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
Can be set to `"both"` in order to push both safe and unsafe weights.
"""
# Create repo if it doesn't exist yet
repo_url = create_repo(repo_id, token=token, private=private, exist_ok=True)
# Infer complete repo_id from repo_url
# Can be different from the input `repo_id` if repo_owner was implicit
_, repo_owner, repo_name = repo_type_and_id_from_hf_id(repo_url)
repo_id = f"{repo_owner}/{repo_name}"
# Check if README file already exist in repo
try:
get_hf_file_metadata(hf_hub_url(repo_id=repo_id, filename="README.md", revision=revision))
has_readme = True
except EntryNotFoundError:
has_readme = False
# Dump model and push to Hub
with TemporaryDirectory() as tmpdir:
# Save model weights and config.
save_for_hf(
model,
tmpdir,
model_config=model_config,
model_args=model_args,
safe_serialization=safe_serialization,
)
# Add readme if it does not exist
if not has_readme:
model_card = model_card or {}
model_name = repo_id.split('/')[-1]
readme_path = Path(tmpdir) / "README.md"
readme_text = generate_readme(model_card, model_name)
readme_path.write_text(readme_text)
# Upload model and return
return upload_folder(
repo_id=repo_id,
folder_path=tmpdir,
revision=revision,
create_pr=create_pr,
commit_message=commit_message,
)
def generate_readme(model_card: dict, model_name: str):
tags = model_card.get('tags', None) or ['image-classification', 'timm']
readme_text = "---\n"
if tags:
readme_text += "tags:\n"
for t in tags:
readme_text += f"- {t}\n"
readme_text += f"library_name: {model_card.get('library_name', 'timm')}\n"
readme_text += f"license: {model_card.get('license', 'apache-2.0')}\n"
if 'license_name' in model_card:
readme_text += f"license_name: {model_card.get('license_name')}\n"
if 'license_link' in model_card:
readme_text += f"license_link: {model_card.get('license_link')}\n"
if 'details' in model_card and 'Dataset' in model_card['details']:
readme_text += 'datasets:\n'
if isinstance(model_card['details']['Dataset'], (tuple, list)):
for d in model_card['details']['Dataset']:
readme_text += f"- {d.lower()}\n"
else:
readme_text += f"- {model_card['details']['Dataset'].lower()}\n"
if 'Pretrain Dataset' in model_card['details']:
if isinstance(model_card['details']['Pretrain Dataset'], (tuple, list)):
for d in model_card['details']['Pretrain Dataset']:
readme_text += f"- {d.lower()}\n"
else:
readme_text += f"- {model_card['details']['Pretrain Dataset'].lower()}\n"
readme_text += "---\n"
readme_text += f"# Model card for {model_name}\n"
if 'description' in model_card:
readme_text += f"\n{model_card['description']}\n"
if 'details' in model_card:
readme_text += f"\n## Model Details\n"
for k, v in model_card['details'].items():
if isinstance(v, (list, tuple)):
readme_text += f"- **{k}:**\n"
for vi in v:
readme_text += f" - {vi}\n"
elif isinstance(v, dict):
readme_text += f"- **{k}:**\n"
for ki, vi in v.items():
readme_text += f" - {ki}: {vi}\n"
else:
readme_text += f"- **{k}:** {v}\n"
if 'usage' in model_card:
readme_text += f"\n## Model Usage\n"
readme_text += model_card['usage']
readme_text += '\n'
if 'comparison' in model_card:
readme_text += f"\n## Model Comparison\n"
readme_text += model_card['comparison']
readme_text += '\n'
if 'citation' in model_card:
readme_text += f"\n## Citation\n"
if not isinstance(model_card['citation'], (list, tuple)):
citations = [model_card['citation']]
else:
citations = model_card['citation']
for c in citations:
readme_text += f"```bibtex\n{c}\n```\n"
return readme_text
def _get_safe_alternatives(filename: str) -> Iterable[str]:
"""Returns potential safetensors alternatives for a given filename.
Use case:
When downloading a model from the Huggingface Hub, we first look if a .safetensors file exists and if yes, we use it.
Main use case is filename "pytorch_model.bin" => check for "model.safetensors" or "pytorch_model.safetensors".
"""
if filename == HF_WEIGHTS_NAME:
yield HF_SAFE_WEIGHTS_NAME
if filename == HF_OPEN_CLIP_WEIGHTS_NAME:
yield HF_OPEN_CLIP_SAFE_WEIGHTS_NAME
if filename not in (HF_WEIGHTS_NAME, HF_OPEN_CLIP_WEIGHTS_NAME) and filename.endswith(".bin"):
yield filename[:-4] + ".safetensors"
| pytorch-image-models/timm/models/_hub.py/0 | {
"file_path": "pytorch-image-models/timm/models/_hub.py",
"repo_id": "pytorch-image-models",
"token_count": 7750
} |
""" ConvMixer
"""
from typing import Optional
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SelectAdaptivePool2d
from ._registry import register_model, generate_default_cfgs
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
__all__ = ['ConvMixer']
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class ConvMixer(nn.Module):
def __init__(
self,
dim,
depth,
kernel_size=9,
patch_size=7,
in_chans=3,
num_classes=1000,
global_pool='avg',
drop_rate=0.,
act_layer=nn.GELU,
**kwargs,
):
super().__init__()
self.num_classes = num_classes
self.num_features = self.head_hidden_size = dim
self.grad_checkpointing = False
self.stem = nn.Sequential(
nn.Conv2d(in_chans, dim, kernel_size=patch_size, stride=patch_size),
act_layer(),
nn.BatchNorm2d(dim)
)
self.blocks = nn.Sequential(
*[nn.Sequential(
Residual(nn.Sequential(
nn.Conv2d(dim, dim, kernel_size, groups=dim, padding="same"),
act_layer(),
nn.BatchNorm2d(dim)
)),
nn.Conv2d(dim, dim, kernel_size=1),
act_layer(),
nn.BatchNorm2d(dim)
) for i in range(depth)]
)
self.pooling = SelectAdaptivePool2d(pool_type=global_pool, flatten=True)
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(dim, num_classes) if num_classes > 0 else nn.Identity()
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(stem=r'^stem', blocks=r'^blocks\.(\d+)')
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
self.pooling = SelectAdaptivePool2d(pool_type=global_pool, flatten=True)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.pooling(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_convmixer(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for ConvMixer models.')
return build_model_with_cfg(ConvMixer, variant, pretrained, **kwargs)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .96, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head',
'first_conv': 'stem.0',
**kwargs
}
default_cfgs = generate_default_cfgs({
'convmixer_1536_20.in1k': _cfg(hf_hub_id='timm/'),
'convmixer_768_32.in1k': _cfg(hf_hub_id='timm/'),
'convmixer_1024_20_ks9_p14.in1k': _cfg(hf_hub_id='timm/')
})
@register_model
def convmixer_1536_20(pretrained=False, **kwargs) -> ConvMixer:
model_args = dict(dim=1536, depth=20, kernel_size=9, patch_size=7, **kwargs)
return _create_convmixer('convmixer_1536_20', pretrained, **model_args)
@register_model
def convmixer_768_32(pretrained=False, **kwargs) -> ConvMixer:
model_args = dict(dim=768, depth=32, kernel_size=7, patch_size=7, act_layer=nn.ReLU, **kwargs)
return _create_convmixer('convmixer_768_32', pretrained, **model_args)
@register_model
def convmixer_1024_20_ks9_p14(pretrained=False, **kwargs) -> ConvMixer:
model_args = dict(dim=1024, depth=20, kernel_size=9, patch_size=14, **kwargs)
return _create_convmixer('convmixer_1024_20_ks9_p14', pretrained, **model_args) | pytorch-image-models/timm/models/convmixer.py/0 | {
"file_path": "pytorch-image-models/timm/models/convmixer.py",
"repo_id": "pytorch-image-models",
"token_count": 2255
} |
""" Inception-V3
Originally from torchvision Inception3 model
Licensed BSD-Clause 3 https://github.com/pytorch/vision/blob/master/LICENSE
"""
from functools import partial
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import trunc_normal_, create_classifier, Linear, ConvNormAct
from ._builder import build_model_with_cfg
from ._builder import resolve_pretrained_cfg
from ._manipulate import flatten_modules
from ._registry import register_model, generate_default_cfgs, register_model_deprecations
__all__ = ['InceptionV3'] # model_registry will add each entrypoint fn to this
class InceptionA(nn.Module):
def __init__(self, in_channels, pool_features, conv_block=None):
super(InceptionA, self).__init__()
conv_block = conv_block or ConvNormAct
self.branch1x1 = conv_block(in_channels, 64, kernel_size=1)
self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1)
self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2)
self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1)
self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1)
def _forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return outputs
def forward(self, x):
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionB(nn.Module):
def __init__(self, in_channels, conv_block=None):
super(InceptionB, self).__init__()
conv_block = conv_block or ConvNormAct
self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2)
self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2)
def _forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return outputs
def forward(self, x):
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionC(nn.Module):
def __init__(self, in_channels, channels_7x7, conv_block=None):
super(InceptionC, self).__init__()
conv_block = conv_block or ConvNormAct
self.branch1x1 = conv_block(in_channels, 192, kernel_size=1)
c7 = channels_7x7
self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1)
self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1)
self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
def _forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return outputs
def forward(self, x):
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionD(nn.Module):
def __init__(self, in_channels, conv_block=None):
super(InceptionD, self).__init__()
conv_block = conv_block or ConvNormAct
self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1)
self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2)
self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1)
self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2)
def _forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch7x7x3, branch_pool]
return outputs
def forward(self, x):
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionE(nn.Module):
def __init__(self, in_channels, conv_block=None):
super(InceptionE, self).__init__()
conv_block = conv_block or ConvNormAct
self.branch1x1 = conv_block(in_channels, 320, kernel_size=1)
self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1)
self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1)
self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1)
self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
def _forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return outputs
def forward(self, x):
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes, conv_block=None):
super(InceptionAux, self).__init__()
conv_block = conv_block or ConvNormAct
self.conv0 = conv_block(in_channels, 128, kernel_size=1)
self.conv1 = conv_block(128, 768, kernel_size=5)
self.conv1.stddev = 0.01
self.fc = Linear(768, num_classes)
self.fc.stddev = 0.001
def forward(self, x):
# N x 768 x 17 x 17
x = F.avg_pool2d(x, kernel_size=5, stride=3)
# N x 768 x 5 x 5
x = self.conv0(x)
# N x 128 x 5 x 5
x = self.conv1(x)
# N x 768 x 1 x 1
# Adaptive average pooling
x = F.adaptive_avg_pool2d(x, (1, 1))
# N x 768 x 1 x 1
x = torch.flatten(x, 1)
# N x 768
x = self.fc(x)
# N x 1000
return x
class InceptionV3(nn.Module):
"""Inception-V3
"""
aux_logits: torch.jit.Final[bool]
def __init__(
self,
num_classes=1000,
in_chans=3,
drop_rate=0.,
global_pool='avg',
aux_logits=False,
norm_layer='batchnorm2d',
norm_eps=1e-3,
act_layer='relu',
):
super(InceptionV3, self).__init__()
self.num_classes = num_classes
self.aux_logits = aux_logits
conv_block = partial(
ConvNormAct,
padding=0,
norm_layer=norm_layer,
act_layer=act_layer,
norm_kwargs=dict(eps=norm_eps),
act_kwargs=dict(inplace=True),
)
self.Conv2d_1a_3x3 = conv_block(in_chans, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1)
self.Pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3)
self.Pool2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.Mixed_5b = InceptionA(192, pool_features=32, conv_block=conv_block)
self.Mixed_5c = InceptionA(256, pool_features=64, conv_block=conv_block)
self.Mixed_5d = InceptionA(288, pool_features=64, conv_block=conv_block)
self.Mixed_6a = InceptionB(288, conv_block=conv_block)
self.Mixed_6b = InceptionC(768, channels_7x7=128, conv_block=conv_block)
self.Mixed_6c = InceptionC(768, channels_7x7=160, conv_block=conv_block)
self.Mixed_6d = InceptionC(768, channels_7x7=160, conv_block=conv_block)
self.Mixed_6e = InceptionC(768, channels_7x7=192, conv_block=conv_block)
if aux_logits:
self.AuxLogits = InceptionAux(768, num_classes, conv_block=conv_block)
else:
self.AuxLogits = None
self.Mixed_7a = InceptionD(768, conv_block=conv_block)
self.Mixed_7b = InceptionE(1280, conv_block=conv_block)
self.Mixed_7c = InceptionE(2048, conv_block=conv_block)
self.feature_info = [
dict(num_chs=64, reduction=2, module='Conv2d_2b_3x3'),
dict(num_chs=192, reduction=4, module='Conv2d_4a_3x3'),
dict(num_chs=288, reduction=8, module='Mixed_5d'),
dict(num_chs=768, reduction=16, module='Mixed_6e'),
dict(num_chs=2048, reduction=32, module='Mixed_7c'),
]
self.num_features = self.head_hidden_size = 2048
self.global_pool, self.head_drop, self.fc = create_classifier(
self.num_features,
self.num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
trunc_normal_(m.weight, std=stddev)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def group_matcher(self, coarse=False):
module_map = {k: i for i, (k, _) in enumerate(flatten_modules(self.named_children(), prefix=()))}
module_map.pop(('fc',))
def _matcher(name):
if any([name.startswith(n) for n in ('Conv2d_1', 'Conv2d_2')]):
return 0
elif any([name.startswith(n) for n in ('Conv2d_3', 'Conv2d_4')]):
return 1
else:
for k in module_map.keys():
if k == tuple(name.split('.')[:len(k)]):
return module_map[k]
return float('inf')
return _matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.fc
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def forward_preaux(self, x):
x = self.Conv2d_1a_3x3(x) # N x 32 x 149 x 149
x = self.Conv2d_2a_3x3(x) # N x 32 x 147 x 147
x = self.Conv2d_2b_3x3(x) # N x 64 x 147 x 147
x = self.Pool1(x) # N x 64 x 73 x 73
x = self.Conv2d_3b_1x1(x) # N x 80 x 73 x 73
x = self.Conv2d_4a_3x3(x) # N x 192 x 71 x 71
x = self.Pool2(x) # N x 192 x 35 x 35
x = self.Mixed_5b(x) # N x 256 x 35 x 35
x = self.Mixed_5c(x) # N x 288 x 35 x 35
x = self.Mixed_5d(x) # N x 288 x 35 x 35
x = self.Mixed_6a(x) # N x 768 x 17 x 17
x = self.Mixed_6b(x) # N x 768 x 17 x 17
x = self.Mixed_6c(x) # N x 768 x 17 x 17
x = self.Mixed_6d(x) # N x 768 x 17 x 17
x = self.Mixed_6e(x) # N x 768 x 17 x 17
return x
def forward_postaux(self, x):
x = self.Mixed_7a(x) # N x 1280 x 8 x 8
x = self.Mixed_7b(x) # N x 2048 x 8 x 8
x = self.Mixed_7c(x) # N x 2048 x 8 x 8
return x
def forward_features(self, x):
x = self.forward_preaux(x)
if self.aux_logits:
aux = self.AuxLogits(x)
x = self.forward_postaux(x)
return x, aux
x = self.forward_postaux(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
if pre_logits:
return x
x = self.fc(x)
return x
def forward(self, x):
if self.aux_logits:
x, aux = self.forward_features(x)
x = self.forward_head(x)
return x, aux
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_inception_v3(variant, pretrained=False, **kwargs):
pretrained_cfg = resolve_pretrained_cfg(variant, pretrained_cfg=kwargs.pop('pretrained_cfg', None))
aux_logits = kwargs.get('aux_logits', False)
has_aux_logits = False
if pretrained_cfg:
# only torchvision pretrained weights have aux logits
has_aux_logits = pretrained_cfg.tag == 'tv_in1k'
if aux_logits:
assert not kwargs.pop('features_only', False)
load_strict = has_aux_logits
else:
load_strict = not has_aux_logits
return build_model_with_cfg(
InceptionV3,
variant,
pretrained,
pretrained_cfg=pretrained_cfg,
pretrained_strict=load_strict,
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'Conv2d_1a_3x3.conv', 'classifier': 'fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
# original PyTorch weights, ported from Tensorflow but modified
'inception_v3.tv_in1k': _cfg(
# NOTE checkpoint has aux logit layer weights
hf_hub_id='timm/',
url='https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'),
# my port of Tensorflow SLIM weights (http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz)
'inception_v3.tf_in1k': _cfg(hf_hub_id='timm/'),
# my port of Tensorflow adversarially trained Inception V3 from
# http://download.tensorflow.org/models/adv_inception_v3_2017_08_18.tar.gz
'inception_v3.tf_adv_in1k': _cfg(hf_hub_id='timm/'),
# from gluon pretrained models, best performing in terms of accuracy/loss metrics
# https://gluon-cv.mxnet.io/model_zoo/classification.html
'inception_v3.gluon_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, # also works well with inception defaults
std=IMAGENET_DEFAULT_STD, # also works well with inception defaults
)
})
@register_model
def inception_v3(pretrained=False, **kwargs) -> InceptionV3:
model = _create_inception_v3('inception_v3', pretrained=pretrained, **kwargs)
return model
register_model_deprecations(__name__, {
'tf_inception_v3': 'inception_v3.tf_in1k',
'adv_inception_v3': 'inception_v3.tf_adv_in1k',
'gluon_inception_v3': 'inception_v3.gluon_in1k',
}) | pytorch-image-models/timm/models/inception_v3.py/0 | {
"file_path": "pytorch-image-models/timm/models/inception_v3.py",
"repo_id": "pytorch-image-models",
"token_count": 8642
} |
"""
pnasnet5large implementation grabbed from Cadene's pretrained models
Additional credit to https://github.com/creafz
https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/pnasnet.py
"""
from collections import OrderedDict
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import ConvNormAct, create_conv2d, create_pool2d, create_classifier
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['PNASNet5Large']
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''):
super(SeparableConv2d, self).__init__()
self.depthwise_conv2d = create_conv2d(
in_channels, in_channels, kernel_size=kernel_size,
stride=stride, padding=padding, groups=in_channels)
self.pointwise_conv2d = create_conv2d(
in_channels, out_channels, kernel_size=1, padding=padding)
def forward(self, x):
x = self.depthwise_conv2d(x)
x = self.pointwise_conv2d(x)
return x
class BranchSeparables(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, stem_cell=False, padding=''):
super(BranchSeparables, self).__init__()
middle_channels = out_channels if stem_cell else in_channels
self.act_1 = nn.ReLU()
self.separable_1 = SeparableConv2d(
in_channels, middle_channels, kernel_size, stride=stride, padding=padding)
self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001)
self.act_2 = nn.ReLU()
self.separable_2 = SeparableConv2d(
middle_channels, out_channels, kernel_size, stride=1, padding=padding)
self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.act_1(x)
x = self.separable_1(x)
x = self.bn_sep_1(x)
x = self.act_2(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
class ActConvBn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''):
super(ActConvBn, self).__init__()
self.act = nn.ReLU()
self.conv = create_conv2d(
in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.act(x)
x = self.conv(x)
x = self.bn(x)
return x
class FactorizedReduction(nn.Module):
def __init__(self, in_channels, out_channels, padding=''):
super(FactorizedReduction, self).__init__()
self.act = nn.ReLU()
self.path_1 = nn.Sequential(OrderedDict([
('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)),
('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)),
]))
self.path_2 = nn.Sequential(OrderedDict([
('pad', nn.ZeroPad2d((-1, 1, -1, 1))), # shift
('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)),
('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)),
]))
self.final_path_bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.act(x)
x_path1 = self.path_1(x)
x_path2 = self.path_2(x)
out = self.final_path_bn(torch.cat([x_path1, x_path2], 1))
return out
class CellBase(nn.Module):
def cell_forward(self, x_left, x_right):
x_comb_iter_0_left = self.comb_iter_0_left(x_left)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_right)
x_comb_iter_1_right = self.comb_iter_1_right(x_right)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2_right = self.comb_iter_2_right(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_left = self.comb_iter_3_left(x_comb_iter_2)
x_comb_iter_3_right = self.comb_iter_3_right(x_right)
x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right
x_comb_iter_4_left = self.comb_iter_4_left(x_left)
if self.comb_iter_4_right is not None:
x_comb_iter_4_right = self.comb_iter_4_right(x_right)
else:
x_comb_iter_4_right = x_right
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class CellStem0(CellBase):
def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''):
super(CellStem0, self).__init__()
self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type)
self.comb_iter_0_left = BranchSeparables(
in_chs_left, out_chs_left, kernel_size=5, stride=2, stem_cell=True, padding=pad_type)
self.comb_iter_0_right = nn.Sequential(OrderedDict([
('max_pool', create_pool2d('max', 3, stride=2, padding=pad_type)),
('conv', create_conv2d(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type)),
('bn', nn.BatchNorm2d(out_chs_left, eps=0.001)),
]))
self.comb_iter_1_left = BranchSeparables(
out_chs_right, out_chs_right, kernel_size=7, stride=2, padding=pad_type)
self.comb_iter_1_right = create_pool2d('max', 3, stride=2, padding=pad_type)
self.comb_iter_2_left = BranchSeparables(
out_chs_right, out_chs_right, kernel_size=5, stride=2, padding=pad_type)
self.comb_iter_2_right = BranchSeparables(
out_chs_right, out_chs_right, kernel_size=3, stride=2, padding=pad_type)
self.comb_iter_3_left = BranchSeparables(
out_chs_right, out_chs_right, kernel_size=3, padding=pad_type)
self.comb_iter_3_right = create_pool2d('max', 3, stride=2, padding=pad_type)
self.comb_iter_4_left = BranchSeparables(
in_chs_right, out_chs_right, kernel_size=3, stride=2, stem_cell=True, padding=pad_type)
self.comb_iter_4_right = ActConvBn(
out_chs_right, out_chs_right, kernel_size=1, stride=2, padding=pad_type)
def forward(self, x_left):
x_right = self.conv_1x1(x_left)
x_out = self.cell_forward(x_left, x_right)
return x_out
class Cell(CellBase):
def __init__(
self,
in_chs_left,
out_chs_left,
in_chs_right,
out_chs_right,
pad_type='',
is_reduction=False,
match_prev_layer_dims=False,
):
super(Cell, self).__init__()
# If `is_reduction` is set to `True` stride 2 is used for
# convolution and pooling layers to reduce the spatial size of
# the output of a cell approximately by a factor of 2.
stride = 2 if is_reduction else 1
# If `match_prev_layer_dimensions` is set to `True`
# `FactorizedReduction` is used to reduce the spatial size
# of the left input of a cell approximately by a factor of 2.
self.match_prev_layer_dimensions = match_prev_layer_dims
if match_prev_layer_dims:
self.conv_prev_1x1 = FactorizedReduction(in_chs_left, out_chs_left, padding=pad_type)
else:
self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type)
self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type)
self.comb_iter_0_left = BranchSeparables(
out_chs_left, out_chs_left, kernel_size=5, stride=stride, padding=pad_type)
self.comb_iter_0_right = create_pool2d('max', 3, stride=stride, padding=pad_type)
self.comb_iter_1_left = BranchSeparables(
out_chs_right, out_chs_right, kernel_size=7, stride=stride, padding=pad_type)
self.comb_iter_1_right = create_pool2d('max', 3, stride=stride, padding=pad_type)
self.comb_iter_2_left = BranchSeparables(
out_chs_right, out_chs_right, kernel_size=5, stride=stride, padding=pad_type)
self.comb_iter_2_right = BranchSeparables(
out_chs_right, out_chs_right, kernel_size=3, stride=stride, padding=pad_type)
self.comb_iter_3_left = BranchSeparables(out_chs_right, out_chs_right, kernel_size=3)
self.comb_iter_3_right = create_pool2d('max', 3, stride=stride, padding=pad_type)
self.comb_iter_4_left = BranchSeparables(
out_chs_left, out_chs_left, kernel_size=3, stride=stride, padding=pad_type)
if is_reduction:
self.comb_iter_4_right = ActConvBn(
out_chs_right, out_chs_right, kernel_size=1, stride=stride, padding=pad_type)
else:
self.comb_iter_4_right = None
def forward(self, x_left, x_right):
x_left = self.conv_prev_1x1(x_left)
x_right = self.conv_1x1(x_right)
x_out = self.cell_forward(x_left, x_right)
return x_out
class PNASNet5Large(nn.Module):
def __init__(
self,
num_classes=1000,
in_chans=3,
output_stride=32,
drop_rate=0.,
global_pool='avg',
pad_type='',
):
super(PNASNet5Large, self).__init__()
self.num_classes = num_classes
self.num_features = self.head_hidden_size = 4320
assert output_stride == 32
self.conv_0 = ConvNormAct(
in_chans, 96, kernel_size=3, stride=2, padding=0,
norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False)
self.cell_stem_0 = CellStem0(
in_chs_left=96, out_chs_left=54, in_chs_right=96, out_chs_right=54, pad_type=pad_type)
self.cell_stem_1 = Cell(
in_chs_left=96, out_chs_left=108, in_chs_right=270, out_chs_right=108, pad_type=pad_type,
match_prev_layer_dims=True, is_reduction=True)
self.cell_0 = Cell(
in_chs_left=270, out_chs_left=216, in_chs_right=540, out_chs_right=216, pad_type=pad_type,
match_prev_layer_dims=True)
self.cell_1 = Cell(
in_chs_left=540, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type)
self.cell_2 = Cell(
in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type)
self.cell_3 = Cell(
in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type)
self.cell_4 = Cell(
in_chs_left=1080, out_chs_left=432, in_chs_right=1080, out_chs_right=432, pad_type=pad_type,
is_reduction=True)
self.cell_5 = Cell(
in_chs_left=1080, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type,
match_prev_layer_dims=True)
self.cell_6 = Cell(
in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type)
self.cell_7 = Cell(
in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type)
self.cell_8 = Cell(
in_chs_left=2160, out_chs_left=864, in_chs_right=2160, out_chs_right=864, pad_type=pad_type,
is_reduction=True)
self.cell_9 = Cell(
in_chs_left=2160, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type,
match_prev_layer_dims=True)
self.cell_10 = Cell(
in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type)
self.cell_11 = Cell(
in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type)
self.act = nn.ReLU()
self.feature_info = [
dict(num_chs=96, reduction=2, module='conv_0'),
dict(num_chs=270, reduction=4, module='cell_stem_1.conv_1x1.act'),
dict(num_chs=1080, reduction=8, module='cell_4.conv_1x1.act'),
dict(num_chs=2160, reduction=16, module='cell_8.conv_1x1.act'),
dict(num_chs=4320, reduction=32, module='act'),
]
self.global_pool, self.head_drop, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(stem=r'^conv_0|cell_stem_[01]', blocks=r'^cell_(\d+)')
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.last_linear
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
self.global_pool, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x_conv_0 = self.conv_0(x)
x_stem_0 = self.cell_stem_0(x_conv_0)
x_stem_1 = self.cell_stem_1(x_conv_0, x_stem_0)
x_cell_0 = self.cell_0(x_stem_0, x_stem_1)
x_cell_1 = self.cell_1(x_stem_1, x_cell_0)
x_cell_2 = self.cell_2(x_cell_0, x_cell_1)
x_cell_3 = self.cell_3(x_cell_1, x_cell_2)
x_cell_4 = self.cell_4(x_cell_2, x_cell_3)
x_cell_5 = self.cell_5(x_cell_3, x_cell_4)
x_cell_6 = self.cell_6(x_cell_4, x_cell_5)
x_cell_7 = self.cell_7(x_cell_5, x_cell_6)
x_cell_8 = self.cell_8(x_cell_6, x_cell_7)
x_cell_9 = self.cell_9(x_cell_7, x_cell_8)
x_cell_10 = self.cell_10(x_cell_8, x_cell_9)
x_cell_11 = self.cell_11(x_cell_9, x_cell_10)
x = self.act(x_cell_11)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.last_linear(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_pnasnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
PNASNet5Large,
variant,
pretrained,
feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model
**kwargs,
)
default_cfgs = generate_default_cfgs({
'pnasnet5large.tf_in1k': {
'hf_hub_id': 'timm/',
'input_size': (3, 331, 331),
'pool_size': (11, 11),
'crop_pct': 0.911,
'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5),
'std': (0.5, 0.5, 0.5),
'num_classes': 1000,
'first_conv': 'conv_0.conv',
'classifier': 'last_linear',
},
})
@register_model
def pnasnet5large(pretrained=False, **kwargs) -> PNASNet5Large:
r"""PNASNet-5 model architecture from the
`"Progressive Neural Architecture Search"
<https://arxiv.org/abs/1712.00559>`_ paper.
"""
model_kwargs = dict(pad_type='same', **kwargs)
return _create_pnasnet('pnasnet5large', pretrained, **model_kwargs)
| pytorch-image-models/timm/models/pnasnet.py/0 | {
"file_path": "pytorch-image-models/timm/models/pnasnet.py",
"repo_id": "pytorch-image-models",
"token_count": 7672
} |
""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows`
- https://arxiv.org/pdf/2103.14030
Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below
S3 (AutoFormerV2, https://arxiv.org/abs/2111.14725) Swin weights from
- https://github.com/microsoft/Cream/tree/main/AutoFormerV2
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import logging
import math
from typing import Callable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import PatchEmbed, Mlp, DropPath, ClassifierHead, to_2tuple, to_ntuple, trunc_normal_, \
_assert, use_fused_attn, resize_rel_pos_bias_table, resample_patch_embed, ndgrid
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._features_fx import register_notrace_function
from ._manipulate import checkpoint_seq, named_apply
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
from .vision_transformer import get_init_weights_vit
__all__ = ['SwinTransformer'] # model_registry will add each entrypoint fn to this
_logger = logging.getLogger(__name__)
_int_or_tuple_2_t = Union[int, Tuple[int, int]]
def window_partition(
x: torch.Tensor,
window_size: Tuple[int, int],
) -> torch.Tensor:
"""
Partition into non-overlapping windows with padding if needed.
Args:
x (tensor): input tokens with [B, H, W, C].
window_size (int): window size.
Returns:
windows: windows after partition with [B * num_windows, window_size, window_size, C].
(Hp, Wp): padded height and width before partition
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse(windows, window_size: Tuple[int, int], H: int, W: int):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
C = windows.shape[-1]
x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
return x
def get_relative_position_index(win_h: int, win_w: int):
# get pair-wise relative position index for each token inside the window
coords = torch.stack(ndgrid(torch.arange(win_h), torch.arange(win_w))) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += win_h - 1 # shift to start from 0
relative_coords[:, :, 1] += win_w - 1
relative_coords[:, :, 0] *= 2 * win_w - 1
return relative_coords.sum(-1) # Wh*Ww, Wh*Ww
class WindowAttention(nn.Module):
""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports shifted and non-shifted windows.
"""
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim: int,
num_heads: int,
head_dim: Optional[int] = None,
window_size: _int_or_tuple_2_t = 7,
qkv_bias: bool = True,
attn_drop: float = 0.,
proj_drop: float = 0.,
):
"""
Args:
dim: Number of input channels.
num_heads: Number of attention heads.
head_dim: Number of channels per head (dim // num_heads if not set)
window_size: The height and width of the window.
qkv_bias: If True, add a learnable bias to query, key, value.
attn_drop: Dropout ratio of attention weight.
proj_drop: Dropout ratio of output.
"""
super().__init__()
self.dim = dim
self.window_size = to_2tuple(window_size) # Wh, Ww
win_h, win_w = self.window_size
self.window_area = win_h * win_w
self.num_heads = num_heads
head_dim = head_dim or dim // num_heads
attn_dim = head_dim * num_heads
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn(experimental=True) # NOTE not tested for prime-time yet
# define a parameter table of relative position bias, shape: 2*Wh-1 * 2*Ww-1, nH
self.relative_position_bias_table = nn.Parameter(torch.zeros((2 * win_h - 1) * (2 * win_w - 1), num_heads))
# get pair-wise relative position index for each token inside the window
self.register_buffer("relative_position_index", get_relative_position_index(win_h, win_w), persistent=False)
self.qkv = nn.Linear(dim, attn_dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(attn_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def set_window_size(self, window_size: Tuple[int, int]) -> None:
"""Update window size & interpolate position embeddings
Args:
window_size (int): New window size
"""
window_size = to_2tuple(window_size)
if window_size == self.window_size:
return
self.window_size = window_size
win_h, win_w = self.window_size
self.window_area = win_h * win_w
with torch.no_grad():
new_bias_shape = (2 * win_h - 1) * (2 * win_w - 1), self.num_heads
self.relative_position_bias_table = nn.Parameter(
resize_rel_pos_bias_table(
self.relative_position_bias_table,
new_window_size=self.window_size,
new_bias_shape=new_bias_shape,
))
self.register_buffer("relative_position_index", get_relative_position_index(win_h, win_w), persistent=False)
def _get_rel_pos_bias(self) -> torch.Tensor:
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)].view(self.window_area, self.window_area, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
return relative_position_bias.unsqueeze(0)
def forward(self, x, mask: Optional[torch.Tensor] = None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
if self.fused_attn:
attn_mask = self._get_rel_pos_bias()
if mask is not None:
num_win = mask.shape[0]
mask = mask.view(1, num_win, 1, N, N).expand(B_ // num_win, -1, self.num_heads, -1, -1)
attn_mask = attn_mask + mask.reshape(-1, self.num_heads, N, N)
x = torch.nn.functional.scaled_dot_product_attention(
q, k, v,
attn_mask=attn_mask,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn + self._get_rel_pos_bias()
if mask is not None:
num_win = mask.shape[0]
attn = attn.view(-1, num_win, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B_, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerBlock(nn.Module):
""" Swin Transformer Block.
"""
def __init__(
self,
dim: int,
input_resolution: _int_or_tuple_2_t,
num_heads: int = 4,
head_dim: Optional[int] = None,
window_size: _int_or_tuple_2_t = 7,
shift_size: int = 0,
always_partition: bool = False,
dynamic_mask: bool = False,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
act_layer: Callable = nn.GELU,
norm_layer: Callable = nn.LayerNorm,
):
"""
Args:
dim: Number of input channels.
input_resolution: Input resolution.
window_size: Window size.
num_heads: Number of attention heads.
head_dim: Enforce the number of channels per head
shift_size: Shift size for SW-MSA.
always_partition: Always partition into full windows and shift
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
qkv_bias: If True, add a learnable bias to query, key, value.
proj_drop: Dropout rate.
attn_drop: Attention dropout rate.
drop_path: Stochastic depth rate.
act_layer: Activation layer.
norm_layer: Normalization layer.
"""
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.target_shift_size = to_2tuple(shift_size) # store for later resize
self.always_partition = always_partition
self.dynamic_mask = dynamic_mask
self.window_size, self.shift_size = self._calc_window_shift(window_size, shift_size)
self.window_area = self.window_size[0] * self.window_size[1]
self.mlp_ratio = mlp_ratio
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim,
num_heads=num_heads,
head_dim=head_dim,
window_size=self.window_size,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.register_buffer(
"attn_mask",
None if self.dynamic_mask else self.get_attn_mask(),
persistent=False,
)
def get_attn_mask(self, x: Optional[torch.Tensor] = None) -> Optional[torch.Tensor]:
if any(self.shift_size):
# calculate attention mask for SW-MSA
if x is not None:
H, W = x.shape[1], x.shape[2]
device = x.device
dtype = x.dtype
else:
H, W = self.input_resolution
device = None
dtype = None
H = math.ceil(H / self.window_size[0]) * self.window_size[0]
W = math.ceil(W / self.window_size[1]) * self.window_size[1]
img_mask = torch.zeros((1, H, W, 1), dtype=dtype, device=device) # 1 H W 1
cnt = 0
for h in (
(0, -self.window_size[0]),
(-self.window_size[0], -self.shift_size[0]),
(-self.shift_size[0], None),
):
for w in (
(0, -self.window_size[1]),
(-self.window_size[1], -self.shift_size[1]),
(-self.shift_size[1], None),
):
img_mask[:, h[0]:h[1], w[0]:w[1], :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_area)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
return attn_mask
def _calc_window_shift(
self,
target_window_size: Union[int, Tuple[int, int]],
target_shift_size: Optional[Union[int, Tuple[int, int]]] = None,
) -> Tuple[Tuple[int, int], Tuple[int, int]]:
target_window_size = to_2tuple(target_window_size)
if target_shift_size is None:
# if passed value is None, recalculate from default window_size // 2 if it was previously non-zero
target_shift_size = self.target_shift_size
if any(target_shift_size):
target_shift_size = (target_window_size[0] // 2, target_window_size[1] // 2)
else:
target_shift_size = to_2tuple(target_shift_size)
if self.always_partition:
return target_window_size, target_shift_size
window_size = [r if r <= w else w for r, w in zip(self.input_resolution, target_window_size)]
shift_size = [0 if r <= w else s for r, w, s in zip(self.input_resolution, window_size, target_shift_size)]
return tuple(window_size), tuple(shift_size)
def set_input_size(
self,
feat_size: Tuple[int, int],
window_size: Tuple[int, int],
always_partition: Optional[bool] = None,
):
"""
Args:
feat_size: New input resolution
window_size: New window size
always_partition: Change always_partition attribute if not None
"""
self.input_resolution = feat_size
if always_partition is not None:
self.always_partition = always_partition
self.window_size, self.shift_size = self._calc_window_shift(window_size)
self.window_area = self.window_size[0] * self.window_size[1]
self.attn.set_window_size(self.window_size)
self.register_buffer(
"attn_mask",
None if self.dynamic_mask else self.get_attn_mask(),
persistent=False,
)
def _attn(self, x):
B, H, W, C = x.shape
# cyclic shift
has_shift = any(self.shift_size)
if has_shift:
shifted_x = torch.roll(x, shifts=(-self.shift_size[0], -self.shift_size[1]), dims=(1, 2))
else:
shifted_x = x
# pad for resolution not divisible by window size
pad_h = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0]
pad_w = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1]
shifted_x = torch.nn.functional.pad(shifted_x, (0, 0, 0, pad_w, 0, pad_h))
_, Hp, Wp, _ = shifted_x.shape
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_area, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
if getattr(self, 'dynamic_mask', False):
attn_mask = self.get_attn_mask(shifted_x)
else:
attn_mask = self.attn_mask
attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C)
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
shifted_x = shifted_x[:, :H, :W, :].contiguous()
# reverse cyclic shift
if has_shift:
x = torch.roll(shifted_x, shifts=self.shift_size, dims=(1, 2))
else:
x = shifted_x
return x
def forward(self, x):
B, H, W, C = x.shape
x = x + self.drop_path1(self._attn(self.norm1(x)))
x = x.reshape(B, -1, C)
x = x + self.drop_path2(self.mlp(self.norm2(x)))
x = x.reshape(B, H, W, C)
return x
class PatchMerging(nn.Module):
""" Patch Merging Layer.
"""
def __init__(
self,
dim: int,
out_dim: Optional[int] = None,
norm_layer: Callable = nn.LayerNorm,
):
"""
Args:
dim: Number of input channels.
out_dim: Number of output channels (or 2 * dim if None)
norm_layer: Normalization layer.
"""
super().__init__()
self.dim = dim
self.out_dim = out_dim or 2 * dim
self.norm = norm_layer(4 * dim)
self.reduction = nn.Linear(4 * dim, self.out_dim, bias=False)
def forward(self, x):
B, H, W, C = x.shape
pad_values = (0, 0, 0, W % 2, 0, H % 2)
x = nn.functional.pad(x, pad_values)
_, H, W, _ = x.shape
x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3)
x = self.norm(x)
x = self.reduction(x)
return x
class SwinTransformerStage(nn.Module):
""" A basic Swin Transformer layer for one stage.
"""
def __init__(
self,
dim: int,
out_dim: int,
input_resolution: Tuple[int, int],
depth: int,
downsample: bool = True,
num_heads: int = 4,
head_dim: Optional[int] = None,
window_size: _int_or_tuple_2_t = 7,
always_partition: bool = False,
dynamic_mask: bool = False,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: Union[List[float], float] = 0.,
norm_layer: Callable = nn.LayerNorm,
):
"""
Args:
dim: Number of input channels.
out_dim: Number of output channels.
input_resolution: Input resolution.
depth: Number of blocks.
downsample: Downsample layer at the end of the layer.
num_heads: Number of attention heads.
head_dim: Channels per head (dim // num_heads if not set)
window_size: Local window size.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
qkv_bias: If True, add a learnable bias to query, key, value.
proj_drop: Projection dropout rate.
attn_drop: Attention dropout rate.
drop_path: Stochastic depth rate.
norm_layer: Normalization layer.
"""
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.output_resolution = tuple(i // 2 for i in input_resolution) if downsample else input_resolution
self.depth = depth
self.grad_checkpointing = False
window_size = to_2tuple(window_size)
shift_size = tuple([w // 2 for w in window_size])
# patch merging layer
if downsample:
self.downsample = PatchMerging(
dim=dim,
out_dim=out_dim,
norm_layer=norm_layer,
)
else:
assert dim == out_dim
self.downsample = nn.Identity()
# build blocks
self.blocks = nn.Sequential(*[
SwinTransformerBlock(
dim=out_dim,
input_resolution=self.output_resolution,
num_heads=num_heads,
head_dim=head_dim,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else shift_size,
always_partition=always_partition,
dynamic_mask=dynamic_mask,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
)
for i in range(depth)])
def set_input_size(
self,
feat_size: Tuple[int, int],
window_size: int,
always_partition: Optional[bool] = None,
):
""" Updates the resolution, window size and so the pair-wise relative positions.
Args:
feat_size: New input (feature) resolution
window_size: New window size
always_partition: Always partition / shift the window
"""
self.input_resolution = feat_size
if isinstance(self.downsample, nn.Identity):
self.output_resolution = feat_size
else:
self.output_resolution = tuple(i // 2 for i in feat_size)
for block in self.blocks:
block.set_input_size(
feat_size=self.output_resolution,
window_size=window_size,
always_partition=always_partition,
)
def forward(self, x):
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class SwinTransformer(nn.Module):
""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
"""
def __init__(
self,
img_size: _int_or_tuple_2_t = 224,
patch_size: int = 4,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
embed_dim: int = 96,
depths: Tuple[int, ...] = (2, 2, 6, 2),
num_heads: Tuple[int, ...] = (3, 6, 12, 24),
head_dim: Optional[int] = None,
window_size: _int_or_tuple_2_t = 7,
always_partition: bool = False,
strict_img_size: bool = True,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
drop_rate: float = 0.,
proj_drop_rate: float = 0.,
attn_drop_rate: float = 0.,
drop_path_rate: float = 0.1,
embed_layer: Callable = PatchEmbed,
norm_layer: Union[str, Callable] = nn.LayerNorm,
weight_init: str = '',
**kwargs,
):
"""
Args:
img_size: Input image size.
patch_size: Patch size.
in_chans: Number of input image channels.
num_classes: Number of classes for classification head.
embed_dim: Patch embedding dimension.
depths: Depth of each Swin Transformer layer.
num_heads: Number of attention heads in different layers.
head_dim: Dimension of self-attention heads.
window_size: Window size.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
qkv_bias: If True, add a learnable bias to query, key, value.
drop_rate: Dropout rate.
attn_drop_rate (float): Attention dropout rate.
drop_path_rate (float): Stochastic depth rate.
embed_layer: Patch embedding layer.
norm_layer (nn.Module): Normalization layer.
"""
super().__init__()
assert global_pool in ('', 'avg')
self.num_classes = num_classes
self.global_pool = global_pool
self.output_fmt = 'NHWC'
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.num_features = self.head_hidden_size = int(embed_dim * 2 ** (self.num_layers - 1))
self.feature_info = []
if not isinstance(embed_dim, (tuple, list)):
embed_dim = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
# split image into non-overlapping patches
self.patch_embed = embed_layer(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim[0],
norm_layer=norm_layer,
strict_img_size=strict_img_size,
output_fmt='NHWC',
)
patch_grid = self.patch_embed.grid_size
# build layers
head_dim = to_ntuple(self.num_layers)(head_dim)
if not isinstance(window_size, (list, tuple)):
window_size = to_ntuple(self.num_layers)(window_size)
elif len(window_size) == 2:
window_size = (window_size,) * self.num_layers
assert len(window_size) == self.num_layers
mlp_ratio = to_ntuple(self.num_layers)(mlp_ratio)
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
layers = []
in_dim = embed_dim[0]
scale = 1
for i in range(self.num_layers):
out_dim = embed_dim[i]
layers += [SwinTransformerStage(
dim=in_dim,
out_dim=out_dim,
input_resolution=(
patch_grid[0] // scale,
patch_grid[1] // scale
),
depth=depths[i],
downsample=i > 0,
num_heads=num_heads[i],
head_dim=head_dim[i],
window_size=window_size[i],
always_partition=always_partition,
dynamic_mask=not strict_img_size,
mlp_ratio=mlp_ratio[i],
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
)]
in_dim = out_dim
if i > 0:
scale *= 2
self.feature_info += [dict(num_chs=out_dim, reduction=patch_size * scale, module=f'layers.{i}')]
self.layers = nn.Sequential(*layers)
self.norm = norm_layer(self.num_features)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
input_fmt=self.output_fmt,
)
if weight_init != 'skip':
self.init_weights(weight_init)
@torch.jit.ignore
def init_weights(self, mode=''):
assert mode in ('jax', 'jax_nlhb', 'moco', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.
named_apply(get_init_weights_vit(mode, head_bias=head_bias), self)
@torch.jit.ignore
def no_weight_decay(self):
nwd = set()
for n, _ in self.named_parameters():
if 'relative_position_bias_table' in n:
nwd.add(n)
return nwd
def set_input_size(
self,
img_size: Optional[Tuple[int, int]] = None,
patch_size: Optional[Tuple[int, int]] = None,
window_size: Optional[Tuple[int, int]] = None,
window_ratio: int = 8,
always_partition: Optional[bool] = None,
) -> None:
""" Updates the image resolution and window size.
Args:
img_size: New input resolution, if None current resolution is used
patch_size (Optional[Tuple[int, int]): New patch size, if None use current patch size
window_size: New window size, if None based on new_img_size // window_div
window_ratio: divisor for calculating window size from grid size
always_partition: always partition into windows and shift (even if window size < feat size)
"""
if img_size is not None or patch_size is not None:
self.patch_embed.set_input_size(img_size=img_size, patch_size=patch_size)
patch_grid = self.patch_embed.grid_size
if window_size is None:
window_size = tuple([pg // window_ratio for pg in patch_grid])
for index, stage in enumerate(self.layers):
stage_scale = 2 ** max(index - 1, 0)
stage.set_input_size(
feat_size=(patch_grid[0] // stage_scale, patch_grid[1] // stage_scale),
window_size=window_size,
always_partition=always_partition,
)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^patch_embed', # stem and embed
blocks=r'^layers\.(\d+)' if coarse else [
(r'^layers\.(\d+).downsample', (0,)),
(r'^layers\.(\d+)\.\w+\.(\d+)', None),
(r'^norm', (99999,)),
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for l in self.layers:
l.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, pool_type=global_pool)
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.layers), indices)
# forward pass
x = self.patch_embed(x)
num_stages = len(self.layers)
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
stages = self.layers
else:
stages = self.layers[:max_index + 1]
for i, stage in enumerate(stages):
x = stage(x)
if i in take_indices:
if norm and i == num_stages - 1:
x_inter = self.norm(x) # applying final norm last intermediate
else:
x_inter = x
x_inter = x_inter.permute(0, 3, 1, 2).contiguous()
intermediates.append(x_inter)
if intermediates_only:
return intermediates
x = self.norm(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.layers), indices)
self.layers = self.layers[:max_index + 1] # truncate blocks
if prune_norm:
self.norm = nn.Identity()
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
x = self.patch_embed(x)
x = self.layers(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
old_weights = True
if 'head.fc.weight' in state_dict:
old_weights = False
import re
out_dict = {}
state_dict = state_dict.get('model', state_dict)
state_dict = state_dict.get('state_dict', state_dict)
for k, v in state_dict.items():
if any([n in k for n in ('relative_position_index', 'attn_mask')]):
continue # skip buffers that should not be persistent
if 'patch_embed.proj.weight' in k:
_, _, H, W = model.patch_embed.proj.weight.shape
if v.shape[-2] != H or v.shape[-1] != W:
v = resample_patch_embed(
v,
(H, W),
interpolation='bicubic',
antialias=True,
verbose=True,
)
if k.endswith('relative_position_bias_table'):
m = model.get_submodule(k[:-29])
if v.shape != m.relative_position_bias_table.shape or m.window_size[0] != m.window_size[1]:
v = resize_rel_pos_bias_table(
v,
new_window_size=m.window_size,
new_bias_shape=m.relative_position_bias_table.shape,
)
if old_weights:
k = re.sub(r'layers.(\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k)
k = k.replace('head.', 'head.fc.')
out_dict[k] = v
return out_dict
def _create_swin_transformer(variant, pretrained=False, **kwargs):
default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 3, 1))))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
SwinTransformer, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head.fc',
'license': 'mit', **kwargs
}
default_cfgs = generate_default_cfgs({
'swin_small_patch4_window7_224.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_small_patch4_window7_224_22kto1k_finetune.pth', ),
'swin_base_patch4_window7_224.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth',),
'swin_base_patch4_window12_384.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'swin_large_patch4_window7_224.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth',),
'swin_large_patch4_window12_384.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'swin_tiny_patch4_window7_224.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth',),
'swin_small_patch4_window7_224.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth',),
'swin_base_patch4_window7_224.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth',),
'swin_base_patch4_window12_384.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
# tiny 22k pretrain is worse than 1k, so moved after (untagged priority is based on order)
'swin_tiny_patch4_window7_224.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_tiny_patch4_window7_224_22kto1k_finetune.pth',),
'swin_tiny_patch4_window7_224.ms_in22k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_tiny_patch4_window7_224_22k.pth',
num_classes=21841),
'swin_small_patch4_window7_224.ms_in22k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_small_patch4_window7_224_22k.pth',
num_classes=21841),
'swin_base_patch4_window7_224.ms_in22k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth',
num_classes=21841),
'swin_base_patch4_window12_384.ms_in22k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21841),
'swin_large_patch4_window7_224.ms_in22k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth',
num_classes=21841),
'swin_large_patch4_window12_384.ms_in22k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21841),
'swin_s3_tiny_224.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/s3_t-1d53f6a8.pth'),
'swin_s3_small_224.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/s3_s-3bb4c69d.pth'),
'swin_s3_base_224.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/s3_b-a1e95db4.pth'),
})
@register_model
def swin_tiny_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer:
""" Swin-T @ 224x224, trained ImageNet-1k
"""
model_args = dict(patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24))
return _create_swin_transformer(
'swin_tiny_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swin_small_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer:
""" Swin-S @ 224x224
"""
model_args = dict(patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24))
return _create_swin_transformer(
'swin_small_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swin_base_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer:
""" Swin-B @ 224x224
"""
model_args = dict(patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32))
return _create_swin_transformer(
'swin_base_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swin_base_patch4_window12_384(pretrained=False, **kwargs) -> SwinTransformer:
""" Swin-B @ 384x384
"""
model_args = dict(patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32))
return _create_swin_transformer(
'swin_base_patch4_window12_384', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swin_large_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer:
""" Swin-L @ 224x224
"""
model_args = dict(patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48))
return _create_swin_transformer(
'swin_large_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swin_large_patch4_window12_384(pretrained=False, **kwargs) -> SwinTransformer:
""" Swin-L @ 384x384
"""
model_args = dict(patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48))
return _create_swin_transformer(
'swin_large_patch4_window12_384', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swin_s3_tiny_224(pretrained=False, **kwargs) -> SwinTransformer:
""" Swin-S3-T @ 224x224, https://arxiv.org/abs/2111.14725
"""
model_args = dict(
patch_size=4, window_size=(7, 7, 14, 7), embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24))
return _create_swin_transformer('swin_s3_tiny_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swin_s3_small_224(pretrained=False, **kwargs) -> SwinTransformer:
""" Swin-S3-S @ 224x224, https://arxiv.org/abs/2111.14725
"""
model_args = dict(
patch_size=4, window_size=(14, 14, 14, 7), embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24))
return _create_swin_transformer('swin_s3_small_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swin_s3_base_224(pretrained=False, **kwargs) -> SwinTransformer:
""" Swin-S3-B @ 224x224, https://arxiv.org/abs/2111.14725
"""
model_args = dict(
patch_size=4, window_size=(7, 7, 14, 7), embed_dim=96, depths=(2, 2, 30, 2), num_heads=(3, 6, 12, 24))
return _create_swin_transformer('swin_s3_base_224', pretrained=pretrained, **dict(model_args, **kwargs))
register_model_deprecations(__name__, {
'swin_base_patch4_window7_224_in22k': 'swin_base_patch4_window7_224.ms_in22k',
'swin_base_patch4_window12_384_in22k': 'swin_base_patch4_window12_384.ms_in22k',
'swin_large_patch4_window7_224_in22k': 'swin_large_patch4_window7_224.ms_in22k',
'swin_large_patch4_window12_384_in22k': 'swin_large_patch4_window12_384.ms_in22k',
})
| pytorch-image-models/timm/models/swin_transformer.py/0 | {
"file_path": "pytorch-image-models/timm/models/swin_transformer.py",
"repo_id": "pytorch-image-models",
"token_count": 21031
} |
"""
Ported to pytorch thanks to [tstandley](https://github.com/tstandley/Xception-PyTorch)
@author: tstandley
Adapted by cadene
Creates an Xception Model as defined in:
Francois Chollet
Xception: Deep Learning with Depthwise Separable Convolutions
https://arxiv.org/pdf/1610.02357.pdf
This weights ported from the Keras implementation. Achieves the following performance on the validation set:
Loss:0.9173 Prec@1:78.892 Prec@5:94.292
REMEMBER to set your image size to 3x299x299 for both test and validation
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299
"""
import torch.jit
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import create_classifier
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs, register_model_deprecations
__all__ = ['Xception']
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1):
super(SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(
in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=False)
self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=False)
def forward(self, x):
x = self.conv1(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self, in_channels, out_channels, reps, strides=1, start_with_relu=True, grow_first=True):
super(Block, self).__init__()
if out_channels != in_channels or strides != 1:
self.skip = nn.Conv2d(in_channels, out_channels, 1, stride=strides, bias=False)
self.skipbn = nn.BatchNorm2d(out_channels)
else:
self.skip = None
rep = []
for i in range(reps):
if grow_first:
inc = in_channels if i == 0 else out_channels
outc = out_channels
else:
inc = in_channels
outc = in_channels if i < (reps - 1) else out_channels
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(inc, outc, 3, stride=1, padding=1))
rep.append(nn.BatchNorm2d(outc))
if not start_with_relu:
rep = rep[1:]
else:
rep[0] = nn.ReLU(inplace=False)
if strides != 1:
rep.append(nn.MaxPool2d(3, strides, 1))
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x += skip
return x
class Xception(nn.Module):
"""
Xception optimized for the ImageNet dataset, as specified in
https://arxiv.org/pdf/1610.02357.pdf
"""
def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg'):
""" Constructor
Args:
num_classes: number of classes
"""
super(Xception, self).__init__()
self.drop_rate = drop_rate
self.global_pool = global_pool
self.num_classes = num_classes
self.num_features = self.head_hidden_size = 2048
self.conv1 = nn.Conv2d(in_chans, 32, 3, 2, 0, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.act1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.act2 = nn.ReLU(inplace=True)
self.block1 = Block(64, 128, 2, 2, start_with_relu=False)
self.block2 = Block(128, 256, 2, 2)
self.block3 = Block(256, 728, 2, 2)
self.block4 = Block(728, 728, 3, 1)
self.block5 = Block(728, 728, 3, 1)
self.block6 = Block(728, 728, 3, 1)
self.block7 = Block(728, 728, 3, 1)
self.block8 = Block(728, 728, 3, 1)
self.block9 = Block(728, 728, 3, 1)
self.block10 = Block(728, 728, 3, 1)
self.block11 = Block(728, 728, 3, 1)
self.block12 = Block(728, 1024, 2, 2, grow_first=False)
self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1)
self.bn3 = nn.BatchNorm2d(1536)
self.act3 = nn.ReLU(inplace=True)
self.conv4 = SeparableConv2d(1536, self.num_features, 3, 1, 1)
self.bn4 = nn.BatchNorm2d(self.num_features)
self.act4 = nn.ReLU(inplace=True)
self.feature_info = [
dict(num_chs=64, reduction=2, module='act2'),
dict(num_chs=128, reduction=4, module='block2.rep.0'),
dict(num_chs=256, reduction=8, module='block3.rep.0'),
dict(num_chs=728, reduction=16, module='block12.rep.0'),
dict(num_chs=2048, reduction=32, module='act4'),
]
self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
# #------- init weights --------
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^conv[12]|bn[12]',
blocks=[
(r'^block(\d+)', None),
(r'^conv[34]|bn[34]', (99,)),
],
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, "gradient checkpointing not supported"
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.fc
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.act2(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.act3(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.act4(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
if self.drop_rate:
F.dropout(x, self.drop_rate, training=self.training)
return x if pre_logits else self.fc(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _xception(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
Xception, variant, pretrained,
feature_cfg=dict(feature_cls='hook'),
**kwargs)
default_cfgs = generate_default_cfgs({
'legacy_xception.tf_in1k': {
'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/xception-43020ad28.pth',
'input_size': (3, 299, 299),
'pool_size': (10, 10),
'crop_pct': 0.8975,
'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5),
'std': (0.5, 0.5, 0.5),
'num_classes': 1000,
'first_conv': 'conv1',
'classifier': 'fc'
# The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299
}
})
@register_model
def legacy_xception(pretrained=False, **kwargs) -> Xception:
return _xception('legacy_xception', pretrained=pretrained, **kwargs)
register_model_deprecations(__name__, {
'xception': 'legacy_xception',
})
| pytorch-image-models/timm/models/xception.py/0 | {
"file_path": "pytorch-image-models/timm/models/xception.py",
"repo_id": "pytorch-image-models",
"token_count": 3992
} |
""" PyTorch Lamb optimizer w/ behaviour similar to NVIDIA FusedLamb
This optimizer code was adapted from the following (starting with latest)
* https://github.com/HabanaAI/Model-References/blob/2b435114fe8e31f159b1d3063b8280ae37af7423/PyTorch/nlp/bert/pretraining/lamb.py
* https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py
* https://github.com/cybertronai/pytorch-lamb
Use FusedLamb if you can (GPU). The reason for including this variant of Lamb is to have a version that is
similar in behaviour to APEX FusedLamb if you aren't using NVIDIA GPUs or cannot install/use APEX.
In addition to some cleanup, this Lamb impl has been modified to support PyTorch XLA and has been tested on TPU.
Original copyrights for above sources are below.
Modifications Copyright 2021 Ross Wightman
"""
# Copyright (c) 2021, Habana Labs Ltd. All rights reserved.
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2019 cybertronai
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
from typing import Optional, Tuple
import torch
from torch.optim import Optimizer
from ._types import ParamsT
class Lamb(Optimizer):
"""Implements a pure pytorch variant of FuseLAMB (NvLamb variant) optimizer from apex.optimizers.FusedLAMB
reference: https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py
LAMB was proposed in:
- Large Batch Optimization for Deep Learning - Training BERT in 76 minutes: https://arxiv.org/abs/1904.00962
- On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ
Args:
params: Iterable of parameters to optimize or dicts defining parameter groups.
lr: Learning rate
betas: Coefficients used for computing running averages of gradient and its norm.
eps: Term added to the denominator to improve numerical stability.
weight_decay: Weight decay
grad_averaging: Whether apply (1-beta2) to grad when calculating running averages of gradient.
max_grad_norm: Value used to clip global grad norm.
trust_clip: Enable LAMBC trust ratio clipping.
always_adapt: Apply adaptive learning rate to 0.0 weight decay parameter.
caution: Apply caution.
"""
def __init__(
self,
params: ParamsT,
lr: float = 1e-3,
bias_correction: bool = True,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-6,
weight_decay: float = 0.01,
grad_averaging: bool = True,
max_grad_norm: Optional[float] = 1.0,
trust_clip: bool = False,
always_adapt: bool = False,
caution: bool = False,
decoupled_decay: bool = False,
):
defaults = dict(
lr=lr,
bias_correction=bias_correction,
betas=betas,
eps=eps,
weight_decay=weight_decay,
grad_averaging=grad_averaging,
max_grad_norm=max_grad_norm,
trust_clip=trust_clip,
always_adapt=always_adapt,
caution=caution,
decoupled_decay=decoupled_decay,
)
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault('caution', False)
group.setdefault('decoupled_decay', False)
def _get_clip_grad_norm(self):
max_grad_norm = self.defaults['max_grad_norm']
if max_grad_norm is None:
return None
norms = []
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instead.')
norms.append(torch.linalg.vector_norm(grad))
global_norm = torch.linalg.vector_norm(torch.stack(norms))
clip_global_norm = (global_norm / max_grad_norm).clamp_(min=1.0)
return clip_global_norm
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
clip_grad_norm = self._get_clip_grad_norm() # None if disabled
for group in self.param_groups:
bias_correction = 1 if group['bias_correction'] else 0
beta1, beta2 = group['betas']
grad_averaging = 1 if group['grad_averaging'] else 0
beta3 = 1 - beta1 if grad_averaging else 1.0
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
if bias_correction:
bias_correction1 = 1 - beta1 ** group['step']
bias_correction2 = 1 - beta2 ** group['step']
else:
bias_correction1, bias_correction2 = 1.0, 1.0
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if clip_grad_norm is not None:
grad.div_(clip_grad_norm)
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient valuesa
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=beta3) # m_t
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # v_t
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
update = (exp_avg / bias_correction1).div_(denom)
if group['caution']:
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
mask = (update * grad > 0).to(grad.dtype)
mask.div_(mask.mean().clamp_(min=1e-3))
update.mul_(mask)
weight_decay = group['weight_decay']
if weight_decay != 0:
if group.get('decoupled_decay', False):
p.add_(p, alpha=-group['lr'] * weight_decay)
else:
update.add_(p, alpha=weight_decay)
if weight_decay != 0 or group['always_adapt']:
# Layer-wise LR adaptation. By default, skip adaptation on parameters that are
# excluded from weight decay, unless always_adapt == True, then always enabled.
w_norm = p.norm(2.0)
g_norm = update.norm(2.0)
trust_ratio = w_norm / g_norm
# FIXME nested where required since logical and/or not working in PT XLA
# Set the ratio to 1.0 (no change) if either weight norm or grad norm is zero
trust_ratio = torch.where(
w_norm > 0,
torch.where(g_norm > 0, trust_ratio, 1.0),
1.0,
)
if group['trust_clip']:
# LAMBC trust clipping, upper bound fixed at one
trust_ratio = torch.clamp(trust_ratio, max=1.0)
update.mul_(trust_ratio)
p.add_(update, alpha=-group['lr'])
return loss
| pytorch-image-models/timm/optim/lamb.py/0 | {
"file_path": "pytorch-image-models/timm/optim/lamb.py",
"repo_id": "pytorch-image-models",
"token_count": 4311
} |
""" Distributed training/validation utils
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
import os
from typing import Optional
import torch
from torch import distributed as dist
from .model import unwrap_model
_logger = logging.getLogger(__name__)
def reduce_tensor(tensor, n):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= n
return rt
def distribute_bn(model, world_size, reduce=False):
# ensure every node has the same running bn stats
for bn_name, bn_buf in unwrap_model(model).named_buffers(recurse=True):
if ('running_mean' in bn_name) or ('running_var' in bn_name):
if reduce:
# average bn stats across whole group
torch.distributed.all_reduce(bn_buf, op=dist.ReduceOp.SUM)
bn_buf /= float(world_size)
else:
# broadcast bn stats from rank 0 to whole group
torch.distributed.broadcast(bn_buf, 0)
def is_global_primary(args):
return args.rank == 0
def is_local_primary(args):
return args.local_rank == 0
def is_primary(args, local=False):
return is_local_primary(args) if local else is_global_primary(args)
def is_distributed_env():
if 'WORLD_SIZE' in os.environ:
return int(os.environ['WORLD_SIZE']) > 1
if 'SLURM_NTASKS' in os.environ:
return int(os.environ['SLURM_NTASKS']) > 1
return False
def world_info_from_env():
local_rank = 0
for v in ('LOCAL_RANK', 'MPI_LOCALRANKID', 'SLURM_LOCALID', 'OMPI_COMM_WORLD_LOCAL_RANK'):
if v in os.environ:
local_rank = int(os.environ[v])
break
global_rank = 0
for v in ('RANK', 'PMI_RANK', 'SLURM_PROCID', 'OMPI_COMM_WORLD_RANK'):
if v in os.environ:
global_rank = int(os.environ[v])
break
world_size = 1
for v in ('WORLD_SIZE', 'PMI_SIZE', 'SLURM_NTASKS', 'OMPI_COMM_WORLD_SIZE'):
if v in os.environ:
world_size = int(os.environ[v])
break
return local_rank, global_rank, world_size
def init_distributed_device(args):
# Distributed training = training on more than one GPU.
# Works in both single and multi-node scenarios.
args.distributed = False
args.world_size = 1
args.rank = 0 # global rank
args.local_rank = 0
result = init_distributed_device_so(
device=getattr(args, 'device', 'cuda'),
dist_backend=getattr(args, 'dist_backend', None),
dist_url=getattr(args, 'dist_url', None),
)
args.device = result['device']
args.world_size = result['world_size']
args.rank = result['global_rank']
args.local_rank = result['local_rank']
args.distributed = result['distributed']
device = torch.device(args.device)
return device
def init_distributed_device_so(
device: str = 'cuda',
dist_backend: Optional[str] = None,
dist_url: Optional[str] = None,
):
# Distributed training = training on more than one GPU.
# Works in both single and multi-node scenarios.
distributed = False
world_size = 1
global_rank = 0
local_rank = 0
device_type, *device_idx = device.split(':', maxsplit=1)
if dist_backend is None:
# FIXME: verify that ROCm transform nccl to rccl
dist_backends = {
"xpu": "ccl",
"hpu": "hccl",
"cuda": "nccl",
"npu": "hccl",
}
dist_backend = dist_backends.get(device_type, 'gloo')
dist_url = dist_url or 'env://'
# TBD, support horovod?
# if args.horovod:
# import horovod.torch as hvd
# assert hvd is not None, "Horovod is not installed"
# hvd.init()
# args.local_rank = int(hvd.local_rank())
# args.rank = hvd.rank()
# args.world_size = hvd.size()
# args.distributed = True
# os.environ['LOCAL_RANK'] = str(args.local_rank)
# os.environ['RANK'] = str(args.rank)
# os.environ['WORLD_SIZE'] = str(args.world_size)
if is_distributed_env():
if 'SLURM_PROCID' in os.environ:
# DDP via SLURM
local_rank, global_rank, world_size = world_info_from_env()
# SLURM var -> torch.distributed vars in case needed
os.environ['LOCAL_RANK'] = str(local_rank)
os.environ['RANK'] = str(global_rank)
os.environ['WORLD_SIZE'] = str(world_size)
torch.distributed.init_process_group(
backend=dist_backend,
init_method=dist_url,
world_size=world_size,
rank=global_rank,
)
else:
# DDP via torchrun, torch.distributed.launch
local_rank, _, _ = world_info_from_env()
torch.distributed.init_process_group(
backend=dist_backend,
init_method=dist_url,
)
world_size = torch.distributed.get_world_size()
global_rank = torch.distributed.get_rank()
distributed = True
if device_type == 'cuda':
assert torch.cuda.is_available(), f'CUDA is not available but {device} was specified.'
if device_type == 'npu':
assert torch.npu.is_available(), f'Ascend NPU is not available but {device} was specified.'
if distributed and device != 'cpu':
# Ignore manually specified device index in distributed mode and
# override with resolved local rank, fewer headaches in most setups.
if device_idx:
_logger.warning(f'device index {device_idx[0]} removed from specified ({device}).')
device = f'{device_type}:{local_rank}'
if device.startswith('cuda:'):
torch.cuda.set_device(device)
return dict(
device=device,
global_rank=global_rank,
local_rank=local_rank,
world_size=world_size,
distributed=distributed,
)
| pytorch-image-models/timm/utils/distributed.py/0 | {
"file_path": "pytorch-image-models/timm/utils/distributed.py",
"repo_id": "pytorch-image-models",
"token_count": 2680
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Text-to-SQL
[[open-in-colab]]
In this tutorial, we’ll see how to implement an agent that leverages SQL using `smolagents`.
> Let's start with the golden question: why not keep it simple and use a standard text-to-SQL pipeline?
A standard text-to-sql pipeline is brittle, since the generated SQL query can be incorrect. Even worse, the query could be incorrect, but not raise an error, instead giving some incorrect/useless outputs without raising an alarm.
👉 Instead, an agent system is able to critically inspect outputs and decide if the query needs to be changed or not, thus giving it a huge performance boost.
Let’s build this agent! 💪
Run the line below to install required dependencies:
```bash
!pip install smolagents python-dotenv sqlalchemy --upgrade -q
```
To call the HF Inference API, you will need a valid token as your environment variable `HF_TOKEN`.
We use python-dotenv to load it.
```py
from dotenv import load_dotenv
load_dotenv()
```
Then, we setup the SQL environment:
```py
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
Float,
insert,
inspect,
text,
)
engine = create_engine("sqlite:///:memory:")
metadata_obj = MetaData()
def insert_rows_into_table(rows, table, engine=engine):
for row in rows:
stmt = insert(table).values(**row)
with engine.begin() as connection:
connection.execute(stmt)
table_name = "receipts"
receipts = Table(
table_name,
metadata_obj,
Column("receipt_id", Integer, primary_key=True),
Column("customer_name", String(16), primary_key=True),
Column("price", Float),
Column("tip", Float),
)
metadata_obj.create_all(engine)
rows = [
{"receipt_id": 1, "customer_name": "Alan Payne", "price": 12.06, "tip": 1.20},
{"receipt_id": 2, "customer_name": "Alex Mason", "price": 23.86, "tip": 0.24},
{"receipt_id": 3, "customer_name": "Woodrow Wilson", "price": 53.43, "tip": 5.43},
{"receipt_id": 4, "customer_name": "Margaret James", "price": 21.11, "tip": 1.00},
]
insert_rows_into_table(rows, receipts)
```
### Build our agent
Now let’s make our SQL table retrievable by a tool.
The tool’s description attribute will be embedded in the LLM’s prompt by the agent system: it gives the LLM information about how to use the tool. This is where we want to describe the SQL table.
```py
inspector = inspect(engine)
columns_info = [(col["name"], col["type"]) for col in inspector.get_columns("receipts")]
table_description = "Columns:\n" + "\n".join([f" - {name}: {col_type}" for name, col_type in columns_info])
print(table_description)
```
```text
Columns:
- receipt_id: INTEGER
- customer_name: VARCHAR(16)
- price: FLOAT
- tip: FLOAT
```
Now let’s build our tool. It needs the following: (read [the tool doc](../tutorials/tools) for more detail)
- A docstring with an `Args:` part listing arguments.
- Type hints on both inputs and output.
```py
from smolagents import tool
@tool
def sql_engine(query: str) -> str:
"""
Allows you to perform SQL queries on the table. Returns a string representation of the result.
The table is named 'receipts'. Its description is as follows:
Columns:
- receipt_id: INTEGER
- customer_name: VARCHAR(16)
- price: FLOAT
- tip: FLOAT
Args:
query: The query to perform. This should be correct SQL.
"""
output = ""
with engine.connect() as con:
rows = con.execute(text(query))
for row in rows:
output += "\n" + str(row)
return output
```
Now let us create an agent that leverages this tool.
We use the `CodeAgent`, which is smolagents’ main agent class: an agent that writes actions in code and can iterate on previous output according to the ReAct framework.
The model is the LLM that powers the agent system. `HfApiModel` allows you to call LLMs using HF’s Inference API, either via Serverless or Dedicated endpoint, but you could also use any proprietary API.
```py
from smolagents import CodeAgent, HfApiModel
agent = CodeAgent(
tools=[sql_engine],
model=HfApiModel("meta-llama/Meta-Llama-3.1-8B-Instruct"),
)
agent.run("Can you give me the name of the client who got the most expensive receipt?")
```
### Level 2: Table joins
Now let’s make it more challenging! We want our agent to handle joins across multiple tables.
So let’s make a second table recording the names of waiters for each receipt_id!
```py
table_name = "waiters"
waiters = Table(
table_name,
metadata_obj,
Column("receipt_id", Integer, primary_key=True),
Column("waiter_name", String(16), primary_key=True),
)
metadata_obj.create_all(engine)
rows = [
{"receipt_id": 1, "waiter_name": "Corey Johnson"},
{"receipt_id": 2, "waiter_name": "Michael Watts"},
{"receipt_id": 3, "waiter_name": "Michael Watts"},
{"receipt_id": 4, "waiter_name": "Margaret James"},
]
insert_rows_into_table(rows, waiters)
```
Since we changed the table, we update the `SQLExecutorTool` with this table’s description to let the LLM properly leverage information from this table.
```py
updated_description = """Allows you to perform SQL queries on the table. Beware that this tool's output is a string representation of the execution output.
It can use the following tables:"""
inspector = inspect(engine)
for table in ["receipts", "waiters"]:
columns_info = [(col["name"], col["type"]) for col in inspector.get_columns(table)]
table_description = f"Table '{table}':\n"
table_description += "Columns:\n" + "\n".join([f" - {name}: {col_type}" for name, col_type in columns_info])
updated_description += "\n\n" + table_description
print(updated_description)
```
Since this request is a bit harder than the previous one, we’ll switch the LLM engine to use the more powerful [Qwen/Qwen2.5-Coder-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct)!
```py
sql_engine.description = updated_description
agent = CodeAgent(
tools=[sql_engine],
model=HfApiModel("Qwen/Qwen2.5-Coder-32B-Instruct"),
)
agent.run("Which waiter got more total money from tips?")
```
It directly works! The setup was surprisingly simple, wasn’t it?
This example is done! We've touched upon these concepts:
- Building new tools.
- Updating a tool's description.
- Switching to a stronger LLM helps agent reasoning.
✅ Now you can go build this text-to-SQL system you’ve always dreamt of! ✨ | smolagents/docs/source/en/examples/text_to_sql.md/0 | {
"file_path": "smolagents/docs/source/en/examples/text_to_sql.md",
"repo_id": "smolagents",
"token_count": 2400
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# एजेंटिक RAG
[[open-in-colab]]
रिट्रीवल-ऑगमेंटेड-जनरेशन (RAG) है "एक यूजर के प्रश्न का उत्तर देने के लिए LLM का उपयोग करना, लेकिन उत्तर को एक नॉलेज बेस से प्राप्त जानकारी पर आधारित करना"। इसमें वैनिला या फाइन-ट्यून्ड LLM का उपयोग करने की तुलना में कई फायदे हैं: कुछ नाम लेने के लिए, यह उत्तर को सत्य तथ्यों पर आधारित करने और काल्पनिक बातों को कम करने की अनुमति देता है, यह LLM को डोमेन-विशिष्ट ज्ञान प्रदान करने की अनुमति देता है, और यह नॉलेज बेस से जानकारी तक पहुंच का सूक्ष्म नियंत्रण प्रदान करता है।
लेकिन वैनिला RAG की सीमाएं हैं, सबसे महत्वपूर्ण ये दो:
- यह केवल एक रिट्रीवल स्टेप करता है: यदि परिणाम खराब हैं, तो जनरेशन भी बदले में खराब होगा।
- सिमेंटिक समानता की गणना यूजर के प्रश्न को संदर्भ के रूप में करके की जाती है, जो अनुकूल नहीं हो सकती: उदाहरण के लिए, यूजर का प्रश्न अक्सर एक सवाल होगा, जबकि सही उत्तर देने वाला डॉक्यूमेंट सकारात्मक स्वर में हो सकता है, और इसका समानता स्कोर अन्य स्रोत दस्तावेज़ों की तुलना में कम हो सकता है, जो प्रश्नवाचक स्वर में हो सकते हैं। इससे संबंधित जानकारी को चूकने का जोखिम होता है।
हम एक RAG एजेंट बनाकर इन समस्याओं को कम कर सकते हैं: बहुत सरल तरीके से, एक रिट्रीवर टूल से लैस एजेंट!
यह एजेंट करेगा: ✅ स्वयं क्वेरी तैयार करेगा और ✅ आवश्यकता पड़ने पर पुनः-प्राप्ति के लिए समीक्षा करेगा।
इसलिए यह सहज रूप से कुछ उन्नत RAG तकनीकों को प्राप्त कर लेना चाहिए!
- सिमेंटिक खोज में सीधे यूजर क्वेरी का संदर्भ के रूप में उपयोग करने के बजाय, एजेंट स्वयं एक संदर्भ वाक्य तैयार करता है जो लक्षित डॉक्यूमेंट्स के करीब हो सकता है, जैसा कि [HyDE](https://huggingface.co/papers/2212.10496) में किया गया है।
एजेंट जनरेट किए गए स्निपेट्स का उपयोग कर सकता है और आवश्यकता पड़ने पर पुनः-प्राप्ति कर सकता है, जैसा कि [Self-Query](https://docs.llamaindex.ai/en/stable/examples/evaluation/RetryQuery/) में किया गया है।
चलिए इस सिस्टम को बनाते हैं। 🛠️
आवश्यक डिपेंडेंसी इंस्टॉल करने के लिए नीचे दी गई लाइन चलाएं।
```bash
!pip install smolagents pandas langchain langchain-community sentence-transformers rank_bm25 --upgrade -q
```
HF Inference API को कॉल करने के लिए, आपको अपने एनवायरनमेंट वेरिएबल `HF_TOKEN` के रूप में एक वैध टोकन की आवश्यकता होगी।
हम इसे लोड करने के लिए python-dotenv का उपयोग करते हैं।
```py
from dotenv import load_dotenv
load_dotenv()
```
हम पहले एक नॉलेज बेस लोड करते हैं जिस पर हम RAG को लागू करना चाहते हैं: यह डेटा सेट Hugging Face के कई लाइब्रेरी के डॉक्यूमेंट पृष्ठों का संकलन है, जिन्हें Markdown में स्टोर किया गया है। हम केवल `transformers` लाइब्रेरी के दस्तावेज़ों को रखेंगे।
फिर डेटासेट को प्रोसेस करके और इसे एक वेक्टर डेटाबेस में स्टोर करके नॉलेज बेस तैयार करें जिसे रिट्रीवर द्वारा उपयोग किया जाएगा।
हम [LangChain](https://python.langchain.com/docs/introduction/) का उपयोग करते हैं क्योंकि इसमें उत्कृष्ट वेक्टर डेटाबेस उपयोगिताएं हैं।
```py
import datasets
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.retrievers import BM25Retriever
knowledge_base = datasets.load_dataset("m-ric/huggingface_doc", split="train")
knowledge_base = knowledge_base.filter(lambda row: row["source"].startswith("huggingface/transformers"))
source_docs = [
Document(page_content=doc["text"], metadata={"source": doc["source"].split("/")[1]})
for doc in knowledge_base
]
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=50,
add_start_index=True,
strip_whitespace=True,
separators=["\n\n", "\n", ".", " ", ""],
)
docs_processed = text_splitter.split_documents(source_docs)
```
अब डॉक्यूमेंट्स तैयार हैं।
तो चलिए अपना एजेंटिक RAG सिस्टम बनाएं!
👉 हमें केवल एक RetrieverTool की आवश्यकता है जिसका उपयोग हमारा एजेंट नॉलेज बेस से जानकारी प्राप्त करने के लिए कर सकता है।
चूंकि हमें टूल के एट्रीब्यूट के रूप में एक vectordb जोड़ने की आवश्यकता है, हम सरल टूल कंस्ट्रक्टर को `@tool` डेकोरेटर के साथ सीधे उपयोग नहीं कर सकते: इसलिए हम [tools tutorial](../tutorials/tools) में हाइलाइट किए गए सेटअप का पालन करेंगे।
```py
from smolagents import Tool
class RetrieverTool(Tool):
name = "retriever"
description = "Uses semantic search to retrieve the parts of transformers documentation that could be most relevant to answer your query."
inputs = {
"query": {
"type": "string",
"description": "The query to perform. This should be semantically close to your target documents. Use the affirmative form rather than a question.",
}
}
output_type = "string"
def __init__(self, docs, **kwargs):
super().__init__(**kwargs)
self.retriever = BM25Retriever.from_documents(
docs, k=10
)
def forward(self, query: str) -> str:
assert isinstance(query, str), "Your search query must be a string"
docs = self.retriever.invoke(
query,
)
return "\nRetrieved documents:\n" + "".join(
[
f"\n\n===== Document {str(i)} =====\n" + doc.page_content
for i, doc in enumerate(docs)
]
)
retriever_tool = RetrieverTool(docs_processed)
```
हमने BM25 का उपयोग किया है, जो एक क्लासिक रिट्रीवल विधि है, क्योंकि इसे सेटअप करना बहुत आसान है।
रिट्रीवल सटीकता में सुधार करने के लिए, आप BM25 को डॉक्यूमेंट्स के लिए वेक्टर प्रतिनिधित्व का उपयोग करके सिमेंटिक खोज से बदल सकते हैं: इस प्रकार आप एक अच्छा एम्बेडिंग मॉडल चुनने के लिए [MTEB Leaderboard](https://huggingface.co/spaces/mteb/leaderboard) पर जा सकते हैं।
अब यह सीधा है कि एक एजेंट बनाया जाए जो इस `retriever_tool` का उपयोग करेगा!
एजेंट को इनिशियलाइजेशन पर इन आर्गुमेंट्स की आवश्यकता होगी:
- `tools`: टूल्स की एक सूची जिन्हें एजेंट कॉल कर सकेगा।
- `model`: LLM जो एजेंट को पावर देता है।
हमारा `model` एक कॉलेबल होना चाहिए जो इनपुट के रूप में संदेशों की एक सूची लेता है और टेक्स्ट लौटाता है। इसे एक stop_sequences आर्गुमेंट भी स्वीकार करने की आवश्यकता है जो बताता है कि जनरेशन कब रोकनी है। सुविधा के लिए, हम सीधे पैकेज में प्रदान की गई HfEngine क्लास का उपयोग करते हैं ताकि एक LLM इंजन मिल सके जो Hugging Face के Inference API को कॉल करता है।
और हम [meta-llama/Llama-3.3-70B-Instruct](meta-llama/Llama-3.3-70B-Instruct) का उपयोग llm इंजन के रूप में करते हैं क्योंकि:
- इसमें लंबा 128k कॉन्टेक्स्ट है, जो लंबे स्रोत दस्तावेजों को प्रोसेस करने में मददगार है
- यह हर समय HF के Inference API पर मुफ्त में उपलब्ध है!
_नोट:_ Inference API विभिन्न मानदंडों के आधार पर मॉडल होस्ट करता है, और डिप्लॉय किए गए मॉडल बिना पूर्व सूचना के अपडेट या बदले जा सकते हैं। इसके बारे में अधिक जानें [यहां](https://huggingface.co/docs/api-inference/supported-models) पढ़ें।
```py
from smolagents import HfApiModel, CodeAgent
agent = CodeAgent(
tools=[retriever_tool], model=HfApiModel("meta-llama/Llama-3.3-70B-Instruct"), max_steps=4, verbosity_level=2
)
```
CodeAgent को इनिशियलाइज करने पर, इसे स्वचालित रूप से एक डिफ़ॉल्ट सिस्टम प्रॉम्प्ट दिया गया है जो LLM इंजन को चरण-दर-चरण प्रोसेस करने और कोड स्निपेट्स के रूप में टूल कॉल जनरेट करने के लिए कहता है, लेकिन आप आवश्यकतानुसार इस प्रॉम्प्ट टेम्पलेट को अपने से बदल सकते हैं।
जब CodeAgent का `.run()` मेथड लॉन्च किया जाता है, तो एजेंट LLM इंजन को कॉल करने का कार्य करता है, और टूल कॉल्स को निष्पादित करता है, यह सब एक लूप में होता है, जो तब तक चलता है जब तक टूल final_answer के साथ अंतिम उत्तर के रूप में नहीं बुलाया जाता।
```py
agent_output = agent.run("For a transformers model training, which is slower, the forward or the backward pass?")
print("Final output:")
print(agent_output)
```
| smolagents/docs/source/hi/examples/rag.md/0 | {
"file_path": "smolagents/docs/source/hi/examples/rag.md",
"repo_id": "smolagents",
"token_count": 7786
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Text-to-SQL
[[open-in-colab]]
在此教程中,我们将看到如何使用 `smolagents` 实现一个利用 SQL 的 agent。
> 让我们从经典问题开始:为什么不简单地使用标准的 text-to-SQL pipeline 呢?
标准的 text-to-SQL pipeline 很脆弱,因为生成的 SQL 查询可能会出错。更糟糕的是,查询可能出错却不引发错误警报,从而返回一些不正确或无用的结果。
👉 相反,agent 系统则可以检视输出结果并决定查询是否需要被更改,因此带来巨大的性能提升。
让我们来一起构建这个 agent! 💪
首先,我们构建一个 SQL 的环境:
```py
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
Float,
insert,
inspect,
text,
)
engine = create_engine("sqlite:///:memory:")
metadata_obj = MetaData()
# create city SQL table
table_name = "receipts"
receipts = Table(
table_name,
metadata_obj,
Column("receipt_id", Integer, primary_key=True),
Column("customer_name", String(16), primary_key=True),
Column("price", Float),
Column("tip", Float),
)
metadata_obj.create_all(engine)
rows = [
{"receipt_id": 1, "customer_name": "Alan Payne", "price": 12.06, "tip": 1.20},
{"receipt_id": 2, "customer_name": "Alex Mason", "price": 23.86, "tip": 0.24},
{"receipt_id": 3, "customer_name": "Woodrow Wilson", "price": 53.43, "tip": 5.43},
{"receipt_id": 4, "customer_name": "Margaret James", "price": 21.11, "tip": 1.00},
]
for row in rows:
stmt = insert(receipts).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
```
### 构建 agent
现在,我们构建一个 agent,它将使用 SQL 查询来回答问题。工具的 description 属性将被 agent 系统嵌入到 LLM 的提示中:它为 LLM 提供有关如何使用该工具的信息。这正是我们描述 SQL 表的地方。
```py
inspector = inspect(engine)
columns_info = [(col["name"], col["type"]) for col in inspector.get_columns("receipts")]
table_description = "Columns:\n" + "\n".join([f" - {name}: {col_type}" for name, col_type in columns_info])
print(table_description)
```
```text
Columns:
- receipt_id: INTEGER
- customer_name: VARCHAR(16)
- price: FLOAT
- tip: FLOAT
```
现在让我们构建我们的工具。它需要以下内容:(更多细节请参阅[工具文档](../tutorials/tools))
- 一个带有 `Args:` 部分列出参数的 docstring。
- 输入和输出的type hints。
```py
from smolagents import tool
@tool
def sql_engine(query: str) -> str:
"""
Allows you to perform SQL queries on the table. Returns a string representation of the result.
The table is named 'receipts'. Its description is as follows:
Columns:
- receipt_id: INTEGER
- customer_name: VARCHAR(16)
- price: FLOAT
- tip: FLOAT
Args:
query: The query to perform. This should be correct SQL.
"""
output = ""
with engine.connect() as con:
rows = con.execute(text(query))
for row in rows:
output += "\n" + str(row)
return output
```
我们现在使用这个工具来创建一个 agent。我们使用 `CodeAgent`,这是 smolagent 的主要 agent 类:一个在代码中编写操作并根据 ReAct 框架迭代先前输出的 agent。
这个模型是驱动 agent 系统的 LLM。`HfApiModel` 允许你使用 HF Inference API 调用 LLM,无论是通过 Serverless 还是 Dedicated endpoint,但你也可以使用任何专有 API。
```py
from smolagents import CodeAgent, HfApiModel
agent = CodeAgent(
tools=[sql_engine],
model=HfApiModel("meta-llama/Meta-Llama-3.1-8B-Instruct"),
)
agent.run("Can you give me the name of the client who got the most expensive receipt?")
```
### Level 2: 表连接
现在让我们增加一些挑战!我们希望我们的 agent 能够处理跨多个表的连接。因此,我们创建一个新表,记录每个 receipt_id 的服务员名字!
```py
table_name = "waiters"
receipts = Table(
table_name,
metadata_obj,
Column("receipt_id", Integer, primary_key=True),
Column("waiter_name", String(16), primary_key=True),
)
metadata_obj.create_all(engine)
rows = [
{"receipt_id": 1, "waiter_name": "Corey Johnson"},
{"receipt_id": 2, "waiter_name": "Michael Watts"},
{"receipt_id": 3, "waiter_name": "Michael Watts"},
{"receipt_id": 4, "waiter_name": "Margaret James"},
]
for row in rows:
stmt = insert(receipts).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
```
因为我们改变了表,我们需要更新 `SQLExecutorTool`,让 LLM 能够正确利用这个表的信息。
```py
updated_description = """Allows you to perform SQL queries on the table. Beware that this tool's output is a string representation of the execution output.
It can use the following tables:"""
inspector = inspect(engine)
for table in ["receipts", "waiters"]:
columns_info = [(col["name"], col["type"]) for col in inspector.get_columns(table)]
table_description = f"Table '{table}':\n"
table_description += "Columns:\n" + "\n".join([f" - {name}: {col_type}" for name, col_type in columns_info])
updated_description += "\n\n" + table_description
print(updated_description)
```
因为这个request 比之前的要难一些,我们将 LLM 引擎切换到更强大的 [Qwen/Qwen2.5-Coder-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct)!
```py
sql_engine.description = updated_description
agent = CodeAgent(
tools=[sql_engine],
model=HfApiModel("Qwen/Qwen2.5-Coder-32B-Instruct"),
)
agent.run("Which waiter got more total money from tips?")
```
它直接就能工作!设置过程非常简单,难道不是吗?
这个例子到此结束!我们涵盖了这些概念:
- 构建新工具。
- 更新工具的描述。
- 切换到更强大的 LLM 有助于 agent 推理。
✅ 现在你可以构建你一直梦寐以求的 text-to-SQL 系统了!✨
| smolagents/docs/source/zh/examples/text_to_sql.md/0 | {
"file_path": "smolagents/docs/source/zh/examples/text_to_sql.md",
"repo_id": "smolagents",
"token_count": 3136
} |
<jupyter_start><jupyter_code># !pip install plotly kaleido datasets nbformat -U -q
import os
import datasets
import pandas as pd
from dotenv import load_dotenv
from huggingface_hub import login
load_dotenv(override=True)
login(os.getenv("HF_TOKEN"))
pd.set_option("max_colwidth", None)
OUTPUT_DIR = "output"
eval_ds = datasets.load_dataset("gaia-benchmark/GAIA", "2023_all")["validation"]
eval_ds = eval_ds.rename_columns({"Question": "question", "Final answer": "true_answer", "Level": "task"})
eval_df = pd.DataFrame(eval_ds)
pd.Series(eval_ds["task"]).value_counts()<jupyter_output><empty_output><jupyter_text>1. Load all results<jupyter_code>import glob
results = []
for f in glob.glob(f"{OUTPUT_DIR}/validation/*.jsonl"):
df = pd.read_json(f, lines=True)
df["agent_name"] = f.split("/")[-1].split(".")[0]
results.append(df)
result_df = pd.concat(results)
result_df = result_df.drop(columns=["start_time", "end_time"])
result_df["prediction"] = result_df["prediction"].fillna("No prediction")
import re
from collections import Counter
from scripts.gaia_scorer import check_close_call, question_scorer
result_df["is_correct"] = result_df.apply(lambda x: question_scorer(x["prediction"], x["true_answer"]), axis=1)
result_df["is_near_correct"] = result_df.apply(
lambda x: check_close_call(x["prediction"], x["true_answer"], x["is_correct"]),
axis=1,
)
result_df["count_steps"] = result_df["intermediate_steps"].apply(len)
def find_attachment(question):
matches = eval_df.loc[eval_df["question"].apply(lambda x: x in question), "file_name"]
if len(matches) == 0:
return "Not found"
file_path = matches.values[0]
if isinstance(file_path, str) and len(file_path) > 0:
return file_path.split(".")[-1]
else:
return "None"
result_df["attachment_type"] = result_df["question"].apply(find_attachment)
def extract_tool_calls(code):
regex = r"\b(\w+)\("
function_calls = [el for el in re.findall(regex, code) if el.islower()]
function_call_counter = Counter(function_calls)
return function_call_counter
def sum_tool_calls(steps):
total_count = Counter()
for step in steps:
if "llm_output" in step:
total_count += extract_tool_calls(step["llm_output"])
return total_count
# result_df["tool_calls"] = result_df["intermediate_steps"].apply(sum_tool_calls)
def get_thoughts(x):
try:
output = x[0]["task"]
for y in x[1:]:
try:
if "observation" in y:
output += y["llm_output"] + "\nObservation:" + y["observation"]
else:
output += y["llm_output"] + r"\Error:" + str(y["error"])
except Exception:
pass
return output
except Exception:
return None
result_df["thoughts"] = result_df["intermediate_steps"].apply(lambda x: get_thoughts(x))
result_df["agent_name"].value_counts()<jupyter_output><empty_output><jupyter_text>2. Inspect specific runs<jupyter_code>o1_vision = "code_o1_25-01_visioon"
o1_next = "code_o1_29-01_text"
o1 = "code_o1_01_february_text"
list_versions = [o1, o1_vision, o1_next]
# submission_selection_name = "react_code_llama3-70b_02-05_full-gaia-validation-code"
sel_df = result_df
# sel_df = sel_df.loc[
# (result_df["agent_name"].isin(list_versions))
# # & (~result_df["question"].isin(UNSOLVED_QUESTIONS))
# ]
sel_df = sel_df.reset_index(drop=True)
display(sel_df["agent_name"].value_counts())
sel_df = sel_df.drop_duplicates(subset=["agent_name", "question"])
display(sel_df.groupby("agent_name")[["task"]].value_counts())
print("Total length:", len(sel_df), "- is complete:", len(sel_df) == 165)
# assert sel_df["question"].value_counts().max() == len(list_versions), "Some questions are duplicate!"
display("Average score:", sel_df.groupby("agent_name")[["is_correct"]].mean().round(3))
display(
sel_df.groupby(["agent_name", "task"])[["is_correct", "is_near_correct", "count_steps", "question"]]
.agg(
{
"is_correct": "mean",
"is_near_correct": "mean",
"count_steps": "mean",
"question": "count",
}
)
.rename(columns={"question": "count"})
)
import plotly.express as px
cumulative_df = (
(
sel_df.groupby("agent_name")[["is_correct", "is_near_correct"]]
.expanding(min_periods=1, axis=0, method="single")
.agg({"is_correct": "mean", "is_near_correct": "count"})
.reset_index()
)
.copy()
.rename(columns={"is_near_correct": "index"})
)
cumulative_df["index"] = cumulative_df["index"].astype(int) - 1
def find_question(row):
try:
res = sel_df.loc[sel_df["agent_name"] == row["agent_name"], "question"].iloc[row["index"]][:50]
return res
except Exception:
return ""
cumulative_df["question"] = cumulative_df.apply(find_question, axis=1)
# cumulative_df["question"] = [el[:50] for el in sel_df["question"].values]
# cumulative_df["is_correct"] = cumulative_df["is_correct"] * (165 - 68) / 165
px.line(
cumulative_df,
color="agent_name",
x="index",
y="is_correct",
hover_data="question",
)<jupyter_output><empty_output><jupyter_text>3. Dive deeper into one run<jupyter_code>sel_df = result_df.loc[result_df["agent_name"] == o1]
print(len(sel_df))<jupyter_output>165<jupyter_text>Count errors<jupyter_code>import numpy as np
error_types = [
"AgentParsingError",
"AgentExecutionError",
"AgentMaxIterationsError",
"AgentGenerationError",
]
sel_df[error_types] = 0
sel_df["Count steps"] = np.nan
def count_errors(row):
if isinstance(row["intermediate_steps"], list):
row["Count steps"] = len(row["intermediate_steps"])
for step in row["intermediate_steps"]:
if isinstance(step, dict) and "error" in step:
try:
row[str(step["error"]["error_type"])] += 1
except Exception:
pass
return row
sel_df = sel_df.apply(count_errors, axis=1)
import plotly.express as px
aggregate_errors = (
sel_df.groupby(["is_correct"])[error_types + ["Count steps"]].mean().reset_index().melt(id_vars=["is_correct"])
)
fig = px.bar(
aggregate_errors,
y="value",
x="variable",
color="is_correct",
labels={
"agent_name": "<b>Model</b>",
"task": "<b>Level</b>",
"aggregate_score": "<b>Performance</b>",
"value": "<b>Average count</b>",
"eval_score_GPT4": "<b>Score</b>",
},
)
fig.update_layout(
height=500,
width=800,
barmode="group",
bargroupgap=0.0,
)
fig.update_traces(textposition="outside")
fig.write_image("aggregate_errors.png", scale=3)
fig.show()<jupyter_output><empty_output><jupyter_text>Inspect result by file extension type<jupyter_code>display(
result_df.groupby(["attachment_type"])[["is_correct", "count_steps", "question"]].agg(
{"is_correct": "mean", "count_steps": "mean", "question": "count"}
)
)<jupyter_output><empty_output><jupyter_text>4. Ensembling methods<jupyter_code>counts = result_df["agent_name"].value_counts()
long_series = result_df.loc[result_df["agent_name"].isin(counts[counts > 140].index)]
def majority_vote(df):
df = df[(df["prediction"] != "Unable to determine") & (~df["prediction"].isna()) & (df["prediction"] != "None")]
answer_modes = df.groupby("question")["prediction"].agg(lambda x: x.mode()[0]).reset_index()
first_occurrences = (
df.groupby(["question", "prediction"]).agg({"task": "first", "is_correct": "first"}).reset_index()
)
result = answer_modes.merge(first_occurrences, on=["question", "prediction"], how="left")
return result
def oracle(df):
def get_first_correct_or_first_wrong(group):
correct_answers = group[group["is_correct"]]
if len(correct_answers) > 0:
return correct_answers.iloc[0]
return group.iloc[0]
result = df.groupby("question").apply(get_first_correct_or_first_wrong)
return result.reset_index(drop=True)
display((long_series.groupby("agent_name")["is_correct"].mean() * 100).round(2))
print(f"Majority score: {majority_vote(long_series)['is_correct'].mean() * 100:.2f}")
print(f"Oracle score: {oracle(long_series)['is_correct'].mean() * 100:.2f}")<jupyter_output><empty_output><jupyter_text>Submit<jupyter_code>agent_run = "code_o1_04_february_submission5.jsonl"
df = pd.read_json(f"output/validation/{agent_run}", lines=True)
df = df[["task_id", "prediction", "intermediate_steps"]]
df = df.rename(columns={"prediction": "model_answer", "intermediate_steps": "reasoning_trace"})
df.to_json("submission.jsonl", orient="records", lines=True)<jupyter_output><empty_output> | smolagents/examples/open_deep_research/analysis.ipynb/0 | {
"file_path": "smolagents/examples/open_deep_research/analysis.ipynb",
"repo_id": "smolagents",
"token_count": 3691
} |
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[project]
name = "smolagents"
version = "1.9.0.dev0"
description = "🤗 smolagents: a barebones library for agents. Agents write python code to call tools or orchestrate other agents."
authors = [
{ name="Aymeric Roucher", email="[email protected]" }, { name="Thomas Wolf"},
]
readme = "README.md"
requires-python = ">=3.10"
dependencies = [
"huggingface-hub>=0.28.0",
"requests>=2.32.3",
"rich>=13.9.4",
"pandas>=2.2.3",
"jinja2>=3.1.4",
"pillow>=11.0.0",
"markdownify>=0.14.1",
"duckduckgo-search>=6.3.7",
"python-dotenv"
]
[project.optional-dependencies]
torch = [
"torch",
"torchvision",
]
audio = [
"soundfile",
"smolagents[torch]",
]
e2b = [
"e2b-code-interpreter>=1.0.3",
"python-dotenv>=1.0.1",
]
gradio = [
"gradio>=5.13.2",
]
litellm = [
"litellm>=1.60.2",
]
mcp = [
"mcpadapt>=0.0.6",
"mcp",
]
openai = [
"openai>=1.58.1"
]
telemetry = [
"arize-phoenix",
"opentelemetry-sdk",
"opentelemetry-exporter-otlp",
"openinference-instrumentation-smolagents>=0.1.4"
]
transformers = [
"accelerate",
"transformers>=4.0.0",
"smolagents[torch]",
]
all = [
"smolagents[audio,e2b,gradio,litellm,mcp,openai,telemetry,transformers]",
]
quality = [
"ruff>=0.9.0",
]
test = [
"ipython>=8.31.0", # for interactive environment tests
"pytest>=8.1.0",
"python-dotenv>=1.0.1", # For test_all_docs
"smolagents[all]",
"rank-bm25", # For test_all_docs
]
dev = [
"smolagents[quality,test]",
"sqlalchemy", # for ./examples
]
[tool.pytest.ini_options]
# Add the specified `OPTS` to the set of command line arguments as if they had been specified by the user.
addopts = "-sv --durations=0"
[tool.ruff]
line-length = 119
lint.ignore = [
"F403", # undefined-local-with-import-star
"E501", # line-too-long
]
lint.select = ["E", "F", "I", "W"]
[tool.ruff.lint.per-file-ignores]
"examples/*" = [
"E402", # module-import-not-at-top-of-file
]
[tool.ruff.lint.isort]
known-first-party = ["smolagents"]
lines-after-imports = 2
[tool.setuptools.package-data]
"smolagents.prompts" = ["*.yaml"]
[project.scripts]
smolagent = "smolagents.cli:main"
webagent = "smolagents.vision_web_browser:main" | smolagents/pyproject.toml/0 | {
"file_path": "smolagents/pyproject.toml",
"repo_id": "smolagents",
"token_count": 995
} |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import importlib
import inspect
import json
import logging
import os
import sys
import tempfile
import textwrap
from contextlib import contextmanager
from functools import wraps
from pathlib import Path
from typing import Callable, Dict, List, Optional, Union
from huggingface_hub import (
create_repo,
get_collection,
hf_hub_download,
metadata_update,
upload_folder,
)
from huggingface_hub.utils import is_torch_available
from ._function_type_hints_utils import (
TypeHintParsingException,
_convert_type_hints_to_json_schema,
get_imports,
get_json_schema,
)
from .agent_types import handle_agent_input_types, handle_agent_output_types
from .tool_validation import MethodChecker, validate_tool_attributes
from .utils import _is_package_available, _is_pillow_available, get_source, instance_to_source
logger = logging.getLogger(__name__)
def validate_after_init(cls):
original_init = cls.__init__
@wraps(original_init)
def new_init(self, *args, **kwargs):
original_init(self, *args, **kwargs)
self.validate_arguments()
cls.__init__ = new_init
return cls
AUTHORIZED_TYPES = [
"string",
"boolean",
"integer",
"number",
"image",
"audio",
"array",
"object",
"any",
"null",
]
CONVERSION_DICT = {"str": "string", "int": "integer", "float": "number"}
class Tool:
"""
A base class for the functions used by the agent. Subclass this and implement the `forward` method as well as the
following class attributes:
- **description** (`str`) -- A short description of what your tool does, the inputs it expects and the output(s) it
will return. For instance 'This is a tool that downloads a file from a `url`. It takes the `url` as input, and
returns the text contained in the file'.
- **name** (`str`) -- A performative name that will be used for your tool in the prompt to the agent. For instance
`"text-classifier"` or `"image_generator"`.
- **inputs** (`Dict[str, Dict[str, Union[str, type]]]`) -- The dict of modalities expected for the inputs.
It has one `type`key and a `description`key.
This is used by `launch_gradio_demo` or to make a nice space from your tool, and also can be used in the generated
description for your tool.
- **output_type** (`type`) -- The type of the tool output. This is used by `launch_gradio_demo`
or to make a nice space from your tool, and also can be used in the generated description for your tool.
You can also override the method [`~Tool.setup`] if your tool has an expensive operation to perform before being
usable (such as loading a model). [`~Tool.setup`] will be called the first time you use your tool, but not at
instantiation.
"""
name: str
description: str
inputs: Dict[str, Dict[str, Union[str, type, bool]]]
output_type: str
def __init__(self, *args, **kwargs):
self.is_initialized = False
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
validate_after_init(cls)
def validate_arguments(self):
required_attributes = {
"description": str,
"name": str,
"inputs": dict,
"output_type": str,
}
for attr, expected_type in required_attributes.items():
attr_value = getattr(self, attr, None)
if attr_value is None:
raise TypeError(f"You must set an attribute {attr}.")
if not isinstance(attr_value, expected_type):
raise TypeError(
f"Attribute {attr} should have type {expected_type.__name__}, got {type(attr_value)} instead."
)
for input_name, input_content in self.inputs.items():
assert isinstance(input_content, dict), f"Input '{input_name}' should be a dictionary."
assert "type" in input_content and "description" in input_content, (
f"Input '{input_name}' should have keys 'type' and 'description', has only {list(input_content.keys())}."
)
if input_content["type"] not in AUTHORIZED_TYPES:
raise Exception(
f"Input '{input_name}': type '{input_content['type']}' is not an authorized value, should be one of {AUTHORIZED_TYPES}."
)
assert getattr(self, "output_type", None) in AUTHORIZED_TYPES
# Validate forward function signature, except for Tools that use a "generic" signature (PipelineTool, SpaceToolWrapper, LangChainToolWrapper)
if not (
hasattr(self, "skip_forward_signature_validation")
and getattr(self, "skip_forward_signature_validation") is True
):
signature = inspect.signature(self.forward)
if not set(signature.parameters.keys()) == set(self.inputs.keys()):
raise Exception(
"Tool's 'forward' method should take 'self' as its first argument, then its next arguments should match the keys of tool attribute 'inputs'."
)
json_schema = _convert_type_hints_to_json_schema(self.forward, error_on_missing_type_hints=False)[
"properties"
] # This function will not raise an error on missing docstrings, contrary to get_json_schema
for key, value in self.inputs.items():
assert key in json_schema, (
f"Input '{key}' should be present in function signature, found only {json_schema.keys()}"
)
if "nullable" in value:
assert "nullable" in json_schema[key], (
f"Nullable argument '{key}' in inputs should have key 'nullable' set to True in function signature."
)
if key in json_schema and "nullable" in json_schema[key]:
assert "nullable" in value, (
f"Nullable argument '{key}' in function signature should have key 'nullable' set to True in inputs."
)
def forward(self, *args, **kwargs):
return NotImplementedError("Write this method in your subclass of `Tool`.")
def __call__(self, *args, sanitize_inputs_outputs: bool = False, **kwargs):
if not self.is_initialized:
self.setup()
# Handle the arguments might be passed as a single dictionary
if len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], dict):
potential_kwargs = args[0]
# If the dictionary keys match our input parameters, convert it to kwargs
if all(key in self.inputs for key in potential_kwargs):
args = ()
kwargs = potential_kwargs
if sanitize_inputs_outputs:
args, kwargs = handle_agent_input_types(*args, **kwargs)
outputs = self.forward(*args, **kwargs)
if sanitize_inputs_outputs:
outputs = handle_agent_output_types(outputs, self.output_type)
return outputs
def setup(self):
"""
Overwrite this method here for any operation that is expensive and needs to be executed before you start using
your tool. Such as loading a big model.
"""
self.is_initialized = True
def save(self, output_dir):
"""
Saves the relevant code files for your tool so it can be pushed to the Hub. This will copy the code of your
tool in `output_dir` as well as autogenerate:
- a `tool.py` file containing the logic for your tool.
- an `app.py` file providing an UI for your tool when it is exported to a Space with `tool.push_to_hub()`
- a `requirements.txt` containing the names of the module used by your tool (as detected when inspecting its
code)
Args:
output_dir (`str`): The folder in which you want to save your tool.
"""
os.makedirs(output_dir, exist_ok=True)
class_name = self.__class__.__name__
tool_file = os.path.join(output_dir, "tool.py")
# Save tool file
if type(self).__name__ == "SimpleTool":
# Check that imports are self-contained
source_code = get_source(self.forward).replace("@tool", "")
forward_node = ast.parse(source_code)
# If tool was created using '@tool' decorator, it has only a forward pass, so it's simpler to just get its code
method_checker = MethodChecker(set())
method_checker.visit(forward_node)
if len(method_checker.errors) > 0:
raise (ValueError("\n".join(method_checker.errors)))
forward_source_code = get_source(self.forward)
tool_code = textwrap.dedent(
f"""
from smolagents import Tool
from typing import Optional
class {class_name}(Tool):
name = "{self.name}"
description = "{self.description}"
inputs = {json.dumps(self.inputs, separators=(",", ":"))}
output_type = "{self.output_type}"
"""
).strip()
import re
def add_self_argument(source_code: str) -> str:
"""Add 'self' as first argument to a function definition if not present."""
pattern = r"def forward\(((?!self)[^)]*)\)"
def replacement(match):
args = match.group(1).strip()
if args: # If there are other arguments
return f"def forward(self, {args})"
return "def forward(self)"
return re.sub(pattern, replacement, source_code)
forward_source_code = forward_source_code.replace(self.name, "forward")
forward_source_code = add_self_argument(forward_source_code)
forward_source_code = forward_source_code.replace("@tool", "").strip()
tool_code += "\n\n" + textwrap.indent(forward_source_code, " ")
else: # If the tool was not created by the @tool decorator, it was made by subclassing Tool
if type(self).__name__ in [
"SpaceToolWrapper",
"LangChainToolWrapper",
"GradioToolWrapper",
]:
raise ValueError(
"Cannot save objects created with from_space, from_langchain or from_gradio, as this would create errors."
)
validate_tool_attributes(self.__class__)
tool_code = instance_to_source(self, base_cls=Tool)
with open(tool_file, "w", encoding="utf-8") as f:
f.write(tool_code.replace(":true,", ":True,").replace(":true}", ":True}"))
# Save app file
app_file = os.path.join(output_dir, "app.py")
with open(app_file, "w", encoding="utf-8") as f:
f.write(
textwrap.dedent(
f"""
from smolagents import launch_gradio_demo
from typing import Optional
from tool import {class_name}
tool = {class_name}()
launch_gradio_demo(tool)
"""
).lstrip()
)
# Save requirements file
imports = {el for el in get_imports(tool_file) if el not in sys.stdlib_module_names} | {"smolagents"}
requirements_file = os.path.join(output_dir, "requirements.txt")
with open(requirements_file, "w", encoding="utf-8") as f:
f.write("\n".join(imports) + "\n")
def push_to_hub(
self,
repo_id: str,
commit_message: str = "Upload tool",
private: Optional[bool] = None,
token: Optional[Union[bool, str]] = None,
create_pr: bool = False,
) -> str:
"""
Upload the tool to the Hub.
For this method to work properly, your tool must have been defined in a separate module (not `__main__`).
For instance:
```
from my_tool_module import MyTool
my_tool = MyTool()
my_tool.push_to_hub("my-username/my-space")
```
Parameters:
repo_id (`str`):
The name of the repository you want to push your tool to. It should contain your organization name when
pushing to a given organization.
commit_message (`str`, *optional*, defaults to `"Upload tool"`):
Message to commit while pushing.
private (`bool`, *optional*):
Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists.
token (`bool` or `str`, *optional*):
The token to use as HTTP bearer authorization for remote files. If unset, will use the token generated
when running `huggingface-cli login` (stored in `~/.huggingface`).
create_pr (`bool`, *optional*, defaults to `False`):
Whether or not to create a PR with the uploaded files or directly commit.
"""
repo_url = create_repo(
repo_id=repo_id,
token=token,
private=private,
exist_ok=True,
repo_type="space",
space_sdk="gradio",
)
repo_id = repo_url.repo_id
metadata_update(repo_id, {"tags": ["tool"]}, repo_type="space", token=token)
with tempfile.TemporaryDirectory() as work_dir:
# Save all files.
self.save(work_dir)
with open(work_dir + "/tool.py", "r") as f:
print("\n".join(f.readlines()))
logger.info(f"Uploading the following files to {repo_id}: {','.join(os.listdir(work_dir))}")
return upload_folder(
repo_id=repo_id,
commit_message=commit_message,
folder_path=work_dir,
token=token,
create_pr=create_pr,
repo_type="space",
)
@classmethod
def from_hub(
cls,
repo_id: str,
token: Optional[str] = None,
trust_remote_code: bool = False,
**kwargs,
):
"""
Loads a tool defined on the Hub.
<Tip warning={true}>
Loading a tool from the Hub means that you'll download the tool and execute it locally.
ALWAYS inspect the tool you're downloading before loading it within your runtime, as you would do when
installing a package using pip/npm/apt.
</Tip>
Args:
repo_id (`str`):
The name of the repo on the Hub where your tool is defined.
token (`str`, *optional*):
The token to identify you on hf.co. If unset, will use the token generated when running
`huggingface-cli login` (stored in `~/.huggingface`).
trust_remote_code(`str`, *optional*, defaults to False):
This flags marks that you understand the risk of running remote code and that you trust this tool.
If not setting this to True, loading the tool from Hub will fail.
kwargs (additional keyword arguments, *optional*):
Additional keyword arguments that will be split in two: all arguments relevant to the Hub (such as
`cache_dir`, `revision`, `subfolder`) will be used when downloading the files for your tool, and the
others will be passed along to its init.
"""
if not trust_remote_code:
raise ValueError(
"Loading a tool from Hub requires to trust remote code. Make sure you've inspected the repo and pass `trust_remote_code=True` to load the tool."
)
# Get the tool's tool.py file.
tool_file = hf_hub_download(
repo_id,
"tool.py",
token=token,
repo_type="space",
cache_dir=kwargs.get("cache_dir"),
force_download=kwargs.get("force_download"),
resume_download=kwargs.get("resume_download"),
proxies=kwargs.get("proxies"),
revision=kwargs.get("revision"),
subfolder=kwargs.get("subfolder"),
local_files_only=kwargs.get("local_files_only"),
)
tool_code = Path(tool_file).read_text()
# Find the Tool subclass in the namespace
with tempfile.TemporaryDirectory() as temp_dir:
# Save the code to a file
module_path = os.path.join(temp_dir, "tool.py")
with open(module_path, "w") as f:
f.write(tool_code)
print("TOOL CODE:\n", tool_code)
# Load module from file path
spec = importlib.util.spec_from_file_location("tool", module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# Find and instantiate the Tool class
for item_name in dir(module):
item = getattr(module, item_name)
if isinstance(item, type) and issubclass(item, Tool) and item != Tool:
tool_class = item
break
if tool_class is None:
raise ValueError("No Tool subclass found in the code.")
if not isinstance(tool_class.inputs, dict):
tool_class.inputs = ast.literal_eval(tool_class.inputs)
return tool_class(**kwargs)
@staticmethod
def from_space(
space_id: str,
name: str,
description: str,
api_name: Optional[str] = None,
token: Optional[str] = None,
):
"""
Creates a [`Tool`] from a Space given its id on the Hub.
Args:
space_id (`str`):
The id of the Space on the Hub.
name (`str`):
The name of the tool.
description (`str`):
The description of the tool.
api_name (`str`, *optional*):
The specific api_name to use, if the space has several tabs. If not precised, will default to the first available api.
token (`str`, *optional*):
Add your token to access private spaces or increase your GPU quotas.
Returns:
[`Tool`]:
The Space, as a tool.
Examples:
```py
>>> image_generator = Tool.from_space(
... space_id="black-forest-labs/FLUX.1-schnell",
... name="image-generator",
... description="Generate an image from a prompt"
... )
>>> image = image_generator("Generate an image of a cool surfer in Tahiti")
```
```py
>>> face_swapper = Tool.from_space(
... "tuan2308/face-swap",
... "face_swapper",
... "Tool that puts the face shown on the first image on the second image. You can give it paths to images.",
... )
>>> image = face_swapper('./aymeric.jpeg', './ruth.jpg')
```
"""
from gradio_client import Client, handle_file
class SpaceToolWrapper(Tool):
skip_forward_signature_validation = True
def __init__(
self,
space_id: str,
name: str,
description: str,
api_name: Optional[str] = None,
token: Optional[str] = None,
):
self.name = name
self.description = description
self.client = Client(space_id, hf_token=token)
space_description = self.client.view_api(return_format="dict", print_info=False)["named_endpoints"]
# If api_name is not defined, take the first of the available APIs for this space
if api_name is None:
api_name = list(space_description.keys())[0]
logger.warning(
f"Since `api_name` was not defined, it was automatically set to the first available API: `{api_name}`."
)
self.api_name = api_name
try:
space_description_api = space_description[api_name]
except KeyError:
raise KeyError(f"Could not find specified {api_name=} among available api names.")
self.inputs = {}
for parameter in space_description_api["parameters"]:
if not parameter["parameter_has_default"]:
parameter_type = parameter["type"]["type"]
if parameter_type == "object":
parameter_type = "any"
self.inputs[parameter["parameter_name"]] = {
"type": parameter_type,
"description": parameter["python_type"]["description"],
}
output_component = space_description_api["returns"][0]["component"]
if output_component == "Image":
self.output_type = "image"
elif output_component == "Audio":
self.output_type = "audio"
else:
self.output_type = "any"
self.is_initialized = True
def sanitize_argument_for_prediction(self, arg):
from gradio_client.utils import is_http_url_like
if _is_pillow_available():
from PIL.Image import Image
if _is_pillow_available() and isinstance(arg, Image):
temp_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
arg.save(temp_file.name)
arg = temp_file.name
if (
(isinstance(arg, str) and os.path.isfile(arg))
or (isinstance(arg, Path) and arg.exists() and arg.is_file())
or is_http_url_like(arg)
):
arg = handle_file(arg)
return arg
def forward(self, *args, **kwargs):
# Preprocess args and kwargs:
args = list(args)
for i, arg in enumerate(args):
args[i] = self.sanitize_argument_for_prediction(arg)
for arg_name, arg in kwargs.items():
kwargs[arg_name] = self.sanitize_argument_for_prediction(arg)
output = self.client.predict(*args, api_name=self.api_name, **kwargs)
if isinstance(output, tuple) or isinstance(output, list):
return output[
0
] # Sometime the space also returns the generation seed, in which case the result is at index 0
return output
return SpaceToolWrapper(
space_id=space_id,
name=name,
description=description,
api_name=api_name,
token=token,
)
@staticmethod
def from_gradio(gradio_tool):
"""
Creates a [`Tool`] from a gradio tool.
"""
import inspect
class GradioToolWrapper(Tool):
def __init__(self, _gradio_tool):
self.name = _gradio_tool.name
self.description = _gradio_tool.description
self.output_type = "string"
self._gradio_tool = _gradio_tool
func_args = list(inspect.signature(_gradio_tool.run).parameters.items())
self.inputs = {
key: {"type": CONVERSION_DICT[value.annotation], "description": ""} for key, value in func_args
}
self.forward = self._gradio_tool.run
return GradioToolWrapper(gradio_tool)
@staticmethod
def from_langchain(langchain_tool):
"""
Creates a [`Tool`] from a langchain tool.
"""
class LangChainToolWrapper(Tool):
skip_forward_signature_validation = True
def __init__(self, _langchain_tool):
self.name = _langchain_tool.name.lower()
self.description = _langchain_tool.description
self.inputs = _langchain_tool.args.copy()
for input_content in self.inputs.values():
if "title" in input_content:
input_content.pop("title")
input_content["description"] = ""
self.output_type = "string"
self.langchain_tool = _langchain_tool
self.is_initialized = True
def forward(self, *args, **kwargs):
tool_input = kwargs.copy()
for index, argument in enumerate(args):
if index < len(self.inputs):
input_key = next(iter(self.inputs))
tool_input[input_key] = argument
return self.langchain_tool.run(tool_input)
return LangChainToolWrapper(langchain_tool)
def launch_gradio_demo(tool: Tool):
"""
Launches a gradio demo for a tool. The corresponding tool class needs to properly implement the class attributes
`inputs` and `output_type`.
Args:
tool (`type`): The tool for which to launch the demo.
"""
try:
import gradio as gr
except ImportError:
raise ImportError("Gradio should be installed in order to launch a gradio demo.")
TYPE_TO_COMPONENT_CLASS_MAPPING = {
"image": gr.Image,
"audio": gr.Audio,
"string": gr.Textbox,
"integer": gr.Textbox,
"number": gr.Textbox,
}
def tool_forward(*args, **kwargs):
return tool(*args, sanitize_inputs_outputs=True, **kwargs)
tool_forward.__signature__ = inspect.signature(tool.forward)
gradio_inputs = []
for input_name, input_details in tool.inputs.items():
input_gradio_component_class = TYPE_TO_COMPONENT_CLASS_MAPPING[input_details["type"]]
new_component = input_gradio_component_class(label=input_name)
gradio_inputs.append(new_component)
output_gradio_componentclass = TYPE_TO_COMPONENT_CLASS_MAPPING[tool.output_type]
gradio_output = output_gradio_componentclass(label="Output")
gr.Interface(
fn=tool_forward,
inputs=gradio_inputs,
outputs=gradio_output,
title=tool.name,
article=tool.description,
description=tool.description,
api_name=tool.name,
).launch()
def load_tool(
task_or_repo_id,
model_repo_id: Optional[str] = None,
token: Optional[str] = None,
trust_remote_code: bool = False,
**kwargs,
):
"""
Main function to quickly load a tool from the Hub.
<Tip warning={true}>
Loading a tool means that you'll download the tool and execute it locally.
ALWAYS inspect the tool you're downloading before loading it within your runtime, as you would do when
installing a package using pip/npm/apt.
</Tip>
Args:
task_or_repo_id (`str`):
The task for which to load the tool or a repo ID of a tool on the Hub. Tasks implemented in Transformers
are:
- `"document_question_answering"`
- `"image_question_answering"`
- `"speech_to_text"`
- `"text_to_speech"`
- `"translation"`
model_repo_id (`str`, *optional*):
Use this argument to use a different model than the default one for the tool you selected.
token (`str`, *optional*):
The token to identify you on hf.co. If unset, will use the token generated when running `huggingface-cli
login` (stored in `~/.huggingface`).
trust_remote_code (`bool`, *optional*, defaults to False):
This needs to be accepted in order to load a tool from Hub.
kwargs (additional keyword arguments, *optional*):
Additional keyword arguments that will be split in two: all arguments relevant to the Hub (such as
`cache_dir`, `revision`, `subfolder`) will be used when downloading the files for your tool, and the others
will be passed along to its init.
"""
return Tool.from_hub(
task_or_repo_id,
model_repo_id=model_repo_id,
token=token,
trust_remote_code=trust_remote_code,
**kwargs,
)
def add_description(description):
"""
A decorator that adds a description to a function.
"""
def inner(func):
func.description = description
func.name = func.__name__
return func
return inner
class ToolCollection:
"""
Tool collections enable loading a collection of tools in the agent's toolbox.
Collections can be loaded from a collection in the Hub or from an MCP server, see:
- [`ToolCollection.from_hub`]
- [`ToolCollection.from_mcp`]
For example and usage, see: [`ToolCollection.from_hub`] and [`ToolCollection.from_mcp`]
"""
def __init__(self, tools: List[Tool]):
self.tools = tools
@classmethod
def from_hub(
cls,
collection_slug: str,
token: Optional[str] = None,
trust_remote_code: bool = False,
) -> "ToolCollection":
"""Loads a tool collection from the Hub.
it adds a collection of tools from all Spaces in the collection to the agent's toolbox
> [!NOTE]
> Only Spaces will be fetched, so you can feel free to add models and datasets to your collection if you'd
> like for this collection to showcase them.
Args:
collection_slug (str): The collection slug referencing the collection.
token (str, *optional*): The authentication token if the collection is private.
trust_remote_code (bool, *optional*, defaults to False): Whether to trust the remote code.
Returns:
ToolCollection: A tool collection instance loaded with the tools.
Example:
```py
>>> from smolagents import ToolCollection, CodeAgent
>>> image_tool_collection = ToolCollection.from_hub("huggingface-tools/diffusion-tools-6630bb19a942c2306a2cdb6f")
>>> agent = CodeAgent(tools=[*image_tool_collection.tools], add_base_tools=True)
>>> agent.run("Please draw me a picture of rivers and lakes.")
```
"""
_collection = get_collection(collection_slug, token=token)
_hub_repo_ids = {item.item_id for item in _collection.items if item.item_type == "space"}
tools = {Tool.from_hub(repo_id, token, trust_remote_code) for repo_id in _hub_repo_ids}
return cls(tools)
@classmethod
@contextmanager
def from_mcp(cls, server_parameters) -> "ToolCollection":
"""Automatically load a tool collection from an MCP server.
Note: a separate thread will be spawned to run an asyncio event loop handling
the MCP server.
Args:
server_parameters (mcp.StdioServerParameters): The server parameters to use to
connect to the MCP server.
Returns:
ToolCollection: A tool collection instance.
Example:
```py
>>> from smolagents import ToolCollection, CodeAgent
>>> from mcp import StdioServerParameters
>>> server_parameters = StdioServerParameters(
>>> command="uv",
>>> args=["--quiet", "[email protected]"],
>>> env={"UV_PYTHON": "3.12", **os.environ},
>>> )
>>> with ToolCollection.from_mcp(server_parameters) as tool_collection:
>>> agent = CodeAgent(tools=[*tool_collection.tools], add_base_tools=True)
>>> agent.run("Please find a remedy for hangover.")
```
"""
try:
from mcpadapt.core import MCPAdapt
from mcpadapt.smolagents_adapter import SmolAgentsAdapter
except ImportError:
raise ImportError(
"""Please install 'mcp' extra to use ToolCollection.from_mcp: `pip install "smolagents[mcp]"`."""
)
with MCPAdapt(server_parameters, SmolAgentsAdapter()) as tools:
yield cls(tools)
def tool(tool_function: Callable) -> Tool:
"""
Converts a function into an instance of a Tool subclass.
Args:
tool_function: Your function. Should have type hints for each input and a type hint for the output.
Should also have a docstring description including an 'Args:' part where each argument is described.
"""
tool_json_schema = get_json_schema(tool_function)["function"]
if "return" not in tool_json_schema:
raise TypeHintParsingException("Tool return type not found: make sure your function has a return type hint!")
class SimpleTool(Tool):
def __init__(
self,
name: str,
description: str,
inputs: Dict[str, Dict[str, str]],
output_type: str,
function: Callable,
):
self.name = name
self.description = description
self.inputs = inputs
self.output_type = output_type
self.forward = function
self.is_initialized = True
simple_tool = SimpleTool(
name=tool_json_schema["name"],
description=tool_json_schema["description"],
inputs=tool_json_schema["parameters"]["properties"],
output_type=tool_json_schema["return"]["type"],
function=tool_function,
)
original_signature = inspect.signature(tool_function)
new_parameters = [inspect.Parameter("self", inspect.Parameter.POSITIONAL_ONLY)] + list(
original_signature.parameters.values()
)
new_signature = original_signature.replace(parameters=new_parameters)
simple_tool.forward.__signature__ = new_signature
return simple_tool
class PipelineTool(Tool):
"""
A [`Tool`] tailored towards Transformer models. On top of the class attributes of the base class [`Tool`], you will
need to specify:
- **model_class** (`type`) -- The class to use to load the model in this tool.
- **default_checkpoint** (`str`) -- The default checkpoint that should be used when the user doesn't specify one.
- **pre_processor_class** (`type`, *optional*, defaults to [`transformers.AutoProcessor`]) -- The class to use to load the
pre-processor
- **post_processor_class** (`type`, *optional*, defaults to [`transformers.AutoProcessor`]) -- The class to use to load the
post-processor (when different from the pre-processor).
Args:
model (`str` or [`transformers.PreTrainedModel`], *optional*):
The name of the checkpoint to use for the model, or the instantiated model. If unset, will default to the
value of the class attribute `default_checkpoint`.
pre_processor (`str` or `Any`, *optional*):
The name of the checkpoint to use for the pre-processor, or the instantiated pre-processor (can be a
tokenizer, an image processor, a feature extractor or a processor). Will default to the value of `model` if
unset.
post_processor (`str` or `Any`, *optional*):
The name of the checkpoint to use for the post-processor, or the instantiated pre-processor (can be a
tokenizer, an image processor, a feature extractor or a processor). Will default to the `pre_processor` if
unset.
device (`int`, `str` or `torch.device`, *optional*):
The device on which to execute the model. Will default to any accelerator available (GPU, MPS etc...), the
CPU otherwise.
device_map (`str` or `dict`, *optional*):
If passed along, will be used to instantiate the model.
model_kwargs (`dict`, *optional*):
Any keyword argument to send to the model instantiation.
token (`str`, *optional*):
The token to use as HTTP bearer authorization for remote files. If unset, will use the token generated when
running `huggingface-cli login` (stored in `~/.huggingface`).
hub_kwargs (additional keyword arguments, *optional*):
Any additional keyword argument to send to the methods that will load the data from the Hub.
"""
pre_processor_class = None
model_class = None
post_processor_class = None
default_checkpoint = None
description = "This is a pipeline tool"
name = "pipeline"
inputs = {"prompt": str}
output_type = str
skip_forward_signature_validation = True
def __init__(
self,
model=None,
pre_processor=None,
post_processor=None,
device=None,
device_map=None,
model_kwargs=None,
token=None,
**hub_kwargs,
):
if not is_torch_available() or not _is_package_available("accelerate"):
raise ModuleNotFoundError(
"Please install 'transformers' extra to use a PipelineTool: `pip install 'smolagents[transformers]'`"
)
if model is None:
if self.default_checkpoint is None:
raise ValueError("This tool does not implement a default checkpoint, you need to pass one.")
model = self.default_checkpoint
if pre_processor is None:
pre_processor = model
self.model = model
self.pre_processor = pre_processor
self.post_processor = post_processor
self.device = device
self.device_map = device_map
self.model_kwargs = {} if model_kwargs is None else model_kwargs
if device_map is not None:
self.model_kwargs["device_map"] = device_map
self.hub_kwargs = hub_kwargs
self.hub_kwargs["token"] = token
super().__init__()
def setup(self):
"""
Instantiates the `pre_processor`, `model` and `post_processor` if necessary.
"""
if isinstance(self.pre_processor, str):
if self.pre_processor_class is None:
from transformers import AutoProcessor
self.pre_processor_class = AutoProcessor
self.pre_processor = self.pre_processor_class.from_pretrained(self.pre_processor, **self.hub_kwargs)
if isinstance(self.model, str):
self.model = self.model_class.from_pretrained(self.model, **self.model_kwargs, **self.hub_kwargs)
if self.post_processor is None:
self.post_processor = self.pre_processor
elif isinstance(self.post_processor, str):
if self.post_processor_class is None:
from transformers import AutoProcessor
self.post_processor_class = AutoProcessor
self.post_processor = self.post_processor_class.from_pretrained(self.post_processor, **self.hub_kwargs)
if self.device is None:
if self.device_map is not None:
self.device = list(self.model.hf_device_map.values())[0]
else:
from accelerate import PartialState
self.device = PartialState().default_device
if self.device_map is None:
self.model.to(self.device)
super().setup()
def encode(self, raw_inputs):
"""
Uses the `pre_processor` to prepare the inputs for the `model`.
"""
return self.pre_processor(raw_inputs)
def forward(self, inputs):
"""
Sends the inputs through the `model`.
"""
import torch
with torch.no_grad():
return self.model(**inputs)
def decode(self, outputs):
"""
Uses the `post_processor` to decode the model output.
"""
return self.post_processor(outputs)
def __call__(self, *args, **kwargs):
import torch
from accelerate.utils import send_to_device
args, kwargs = handle_agent_input_types(*args, **kwargs)
if not self.is_initialized:
self.setup()
encoded_inputs = self.encode(*args, **kwargs)
tensor_inputs = {k: v for k, v in encoded_inputs.items() if isinstance(v, torch.Tensor)}
non_tensor_inputs = {k: v for k, v in encoded_inputs.items() if not isinstance(v, torch.Tensor)}
encoded_inputs = send_to_device(tensor_inputs, self.device)
outputs = self.forward({**encoded_inputs, **non_tensor_inputs})
outputs = send_to_device(outputs, "cpu")
decoded_outputs = self.decode(outputs)
return handle_agent_output_types(decoded_outputs, self.output_type)
__all__ = [
"AUTHORIZED_TYPES",
"Tool",
"tool",
"load_tool",
"launch_gradio_demo",
"ToolCollection",
]
| smolagents/src/smolagents/tools.py/0 | {
"file_path": "smolagents/src/smolagents/tools.py",
"repo_id": "smolagents",
"token_count": 18127
} |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from smolagents import (
AgentError,
AgentImage,
CodeAgent,
ToolCallingAgent,
stream_to_gradio,
)
from smolagents.models import (
ChatMessage,
ChatMessageToolCall,
ChatMessageToolCallDefinition,
)
from smolagents.monitoring import AgentLogger, LogLevel
class FakeLLMModel:
def __init__(self):
self.last_input_token_count = 10
self.last_output_token_count = 20
def __call__(self, prompt, tools_to_call_from=None, **kwargs):
if tools_to_call_from is not None:
return ChatMessage(
role="assistant",
content="",
tool_calls=[
ChatMessageToolCall(
id="fake_id",
type="function",
function=ChatMessageToolCallDefinition(name="final_answer", arguments={"answer": "image"}),
)
],
)
else:
return ChatMessage(
role="assistant",
content="""
Code:
```py
final_answer('This is the final answer.')
```""",
)
class MonitoringTester(unittest.TestCase):
def test_code_agent_metrics(self):
agent = CodeAgent(
tools=[],
model=FakeLLMModel(),
max_steps=1,
)
agent.run("Fake task")
self.assertEqual(agent.monitor.total_input_token_count, 10)
self.assertEqual(agent.monitor.total_output_token_count, 20)
def test_toolcalling_agent_metrics(self):
agent = ToolCallingAgent(
tools=[],
model=FakeLLMModel(),
max_steps=1,
)
agent.run("Fake task")
self.assertEqual(agent.monitor.total_input_token_count, 10)
self.assertEqual(agent.monitor.total_output_token_count, 20)
def test_code_agent_metrics_max_steps(self):
class FakeLLMModelMalformedAnswer:
def __init__(self):
self.last_input_token_count = 10
self.last_output_token_count = 20
def __call__(self, prompt, **kwargs):
return ChatMessage(role="assistant", content="Malformed answer")
agent = CodeAgent(
tools=[],
model=FakeLLMModelMalformedAnswer(),
max_steps=1,
)
agent.run("Fake task")
self.assertEqual(agent.monitor.total_input_token_count, 20)
self.assertEqual(agent.monitor.total_output_token_count, 40)
def test_code_agent_metrics_generation_error(self):
class FakeLLMModelGenerationException:
def __init__(self):
self.last_input_token_count = 10
self.last_output_token_count = 20
def __call__(self, prompt, **kwargs):
self.last_input_token_count = 10
self.last_output_token_count = 0
raise Exception("Cannot generate")
agent = CodeAgent(
tools=[],
model=FakeLLMModelGenerationException(),
max_steps=1,
)
agent.run("Fake task")
self.assertEqual(agent.monitor.total_input_token_count, 20) # Should have done two monitoring callbacks
self.assertEqual(agent.monitor.total_output_token_count, 0)
def test_streaming_agent_text_output(self):
agent = CodeAgent(
tools=[],
model=FakeLLMModel(),
max_steps=1,
)
# Use stream_to_gradio to capture the output
outputs = list(stream_to_gradio(agent, task="Test task"))
self.assertEqual(len(outputs), 7)
final_message = outputs[-1]
self.assertEqual(final_message.role, "assistant")
self.assertIn("This is the final answer.", final_message.content)
def test_streaming_agent_image_output(self):
agent = ToolCallingAgent(
tools=[],
model=FakeLLMModel(),
max_steps=1,
)
# Use stream_to_gradio to capture the output
outputs = list(
stream_to_gradio(
agent,
task="Test task",
additional_args=dict(image=AgentImage(value="path.png")),
)
)
self.assertEqual(len(outputs), 5)
final_message = outputs[-1]
self.assertEqual(final_message.role, "assistant")
self.assertIsInstance(final_message.content, dict)
self.assertEqual(final_message.content["path"], "path.png")
self.assertEqual(final_message.content["mime_type"], "image/png")
def test_streaming_with_agent_error(self):
logger = AgentLogger(level=LogLevel.INFO)
def dummy_model(prompt, **kwargs):
raise AgentError("Simulated agent error", logger)
agent = CodeAgent(
tools=[],
model=dummy_model,
max_steps=1,
)
# Use stream_to_gradio to capture the output
outputs = list(stream_to_gradio(agent, task="Test task"))
self.assertEqual(len(outputs), 9)
final_message = outputs[-1]
self.assertEqual(final_message.role, "assistant")
self.assertIn("Simulated agent error", final_message.content)
| smolagents/tests/test_monitoring.py/0 | {
"file_path": "smolagents/tests/test_monitoring.py",
"repo_id": "smolagents",
"token_count": 2645
} |
ARG cuda_arch_list="75-real;80-real;86-real;89-real;90-real;100-real;120-real"
ARG cuda_base=12.8.0
ARG build_type=release
ARG ompi_version=4.1.7
ARG sccache_gha_enabled=off
ARG actions_cache_url=""
ARG actions_runtime_token=""
# CUDA dependent dependencies resolver stage
FROM nvidia/cuda:${cuda_base}-cudnn-devel-ubuntu24.04 AS cuda-builder
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
build-essential \
cmake \
curl \
gcc-14 \
g++-14 \
git \
git-lfs \
lld \
libssl-dev \
libucx-dev \
libasan8 \
libubsan1 \
ninja-build \
pkg-config \
pipx \
python3 \
python3-dev \
python3-setuptools \
tar \
wget --no-install-recommends && \
pipx ensurepath
ENV TGI_INSTALL_PREFIX=/usr/local/tgi
ENV TENSORRT_INSTALL_PREFIX=/usr/local/tensorrt
# Install OpenMPI
FROM cuda-builder AS mpi-builder
WORKDIR /opt/src/mpi
ARG ompi_version
ENV OMPI_VERSION=${ompi_version}
ENV OMPI_TARBALL_FILENAME=openmpi-${OMPI_VERSION}.tar.bz2
ADD --checksum=sha256:54a33cb7ad81ff0976f15a6cc8003c3922f0f3d8ceed14e1813ef3603f22cd34 \
https://download.open-mpi.org/release/open-mpi/v4.1/${OMPI_TARBALL_FILENAME} .
RUN tar --strip-components=1 -xf ${OMPI_TARBALL_FILENAME} &&\
./configure --prefix=/usr/local/mpi --with-cuda=/usr/local/cuda --with-slurm && \
make -j all && \
make install && \
rm -rf ${OMPI_TARBALL_FILENAME}/..
# Install TensorRT
FROM cuda-builder AS trt-builder
COPY backends/trtllm/scripts/install_tensorrt.sh /opt/install_tensorrt.sh
RUN chmod +x /opt/install_tensorrt.sh && \
/opt/install_tensorrt.sh
# Build Backend
FROM cuda-builder AS tgi-builder
WORKDIR /usr/src/text-generation-inference
# Scoped global args reuse
ARG cuda_arch_list
ARG build_type
ARG sccache_gha_enabled
ARG actions_cache_url
ARG actions_runtime_token
# Install Rust
ENV PATH="/root/.cargo/bin:$PATH"
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | bash -s -- -y && \
chmod -R a+w /root/.rustup && \
chmod -R a+w /root/.cargo && \
cargo install sccache --locked
ENV LD_LIBRARY_PATH="/usr/local/mpi/lib:$LD_LIBRARY_PATH"
ENV PKG_CONFIG_PATH="/usr/local/mpi/lib/pkgconfig"
ENV CMAKE_PREFIX_PATH="/usr/local/mpi:/usr/local/tensorrt"
ENV USE_LLD_LINKER=ON
ENV CUDA_ARCH_LIST=${cuda_arch_list}
# SCCACHE Specifics args - before finding a better, more generic, way...
ENV SCCACHE_GHA_ENABLED=${sccache_gha_enabled}
ENV ACTIONS_CACHE_URL=${actions_cache_url}
ENV ACTIONS_RUNTIME_TOKEN=${actions_runtime_token}
COPY Cargo.lock Cargo.lock
COPY Cargo.toml Cargo.toml
COPY rust-toolchain.toml rust-toolchain.toml
COPY router router
COPY backends backends
COPY benchmark benchmark
COPY launcher launcher
COPY --from=trt-builder /usr/local/tensorrt /usr/local/tensorrt
COPY --from=mpi-builder /usr/local/mpi /usr/local/mpi
ENV RUSTC_WRAPPER=sccache
ENV CMAKE_INSTALL_PREFIX=$TGI_INSTALL_PREFIX
RUN export CC=gcc-14 \
export CXX=g++-14 \
export CMAKE_C_COMPILER_LAUNCHER=sccache && \
export CMAKE_CXX_COMPILER_LAUNCHER=sccache && \
export CMAKE_CUDA_COMPILER_LAUNCHER=sccache && \
mkdir $TGI_INSTALL_PREFIX && mkdir "$TGI_INSTALL_PREFIX/include" && mkdir "$TGI_INSTALL_PREFIX/lib" && \
cargo build --profile ${build_type} --package text-generation-backends-trtllm --bin text-generation-backends-trtllm && \
sccache --show-stats
FROM nvidia/cuda:${cuda_base}-cudnn-runtime-ubuntu24.04 AS runtime
RUN apt update && apt install -y libucx0 pipx python3-minimal python3-dev python3-pip python3-venv && \
rm -rf /var/lib/{apt,dpkg,cache,log}/ && \
pipx ensurepath && \
pipx install --include-deps transformers tokenizers
WORKDIR /usr/local/tgi/bin
ENV PATH=/root/.local/share/pipx/venvs/transformers/bin/:$PATH
ENV LD_LIBRARY_PATH="/usr/local/tgi/lib:/usr/local/mpi/lib:/usr/local/tensorrt/lib:/usr/local/cuda/lib64/stubs:$LD_LIBRARY_PATH"
ENV TOKENIZERS_PARALLELISM=false
ENV OMPI_MCA_plm_rsh_agent=""
COPY --from=mpi-builder /usr/local/mpi /usr/local/mpi
COPY --from=trt-builder /usr/local/tensorrt /usr/local/tensorrt
COPY --from=tgi-builder /usr/local/tgi /usr/local/tgi
COPY --from=tgi-builder /usr/src/text-generation-inference/target/release/text-generation-backends-trtllm /usr/local/tgi/bin/text-generation-launcher
# This is used only for the CI/CD
FROM nvidia/cuda:${cuda_base}-cudnn-runtime-ubuntu24.04 AS ci-runtime
RUN apt update && apt install -y libasan8 libubsan1 libucx0 pipx python3-minimal python3-dev python3-pip python3-venv && \
rm -rf /var/lib/{apt,dpkg,cache,log}/ && \
pipx ensurepath && \
pipx install --include-deps transformers tokenizers
WORKDIR /usr/local/tgi/bin
ENV PATH=/root/.local/share/pipx/venvs/transformers/bin/:$PATH
ENV LD_LIBRARY_PATH="/usr/local/tgi/lib:/usr/local/mpi/lib:/usr/local/tensorrt/lib:/usr/local/cuda/lib64/stubs:$LD_LIBRARY_PATH"
ENV TOKENIZERS_PARALLELISM=false
ENV OMPI_MCA_plm_rsh_agent=""
COPY --from=mpi-builder /usr/local/mpi /usr/local/mpi
COPY --from=trt-builder /usr/local/tensorrt /usr/local/tensorrt
COPY --from=tgi-builder /usr/local/tgi /usr/local/tgi
# Basically we copy from target/debug instead of target/release
COPY --from=tgi-builder /usr/src/text-generation-inference/target/debug/text-generation-backends-trtllm /usr/local/tgi/bin/text-generation-launcher
# This is the final image
FROM runtime
LABEL co.huggingface.vendor="Hugging Face Inc."
LABEL org.opencontainers.image.authors="[email protected]"
LABEL org.opencontainers.title="Text-Generation-Inference TensorRT-LLM Backend"
ENTRYPOINT ["./text-generation-launcher"]
CMD ["--executor-worker", "/usr/local/tgi/bin/executorWorker"]
| text-generation-inference/Dockerfile_trtllm/0 | {
"file_path": "text-generation-inference/Dockerfile_trtllm",
"repo_id": "text-generation-inference",
"token_count": 2414
} |
/// Multi shard Client
use crate::{v3, Health, ShardInfo};
use crate::{ClientError, Result};
use crate::v3::{Chunk, InfoResponse, Input};
use async_trait::async_trait;
use futures::future::join_all;
use tonic::transport::Uri;
use tracing::instrument;
use v3::client::{DecodeTimings, PrefillTimings};
use v3::{
Batch, CachedBatch, Client, Generation, GrammarType, HealthResponse,
NextTokenChooserParameters, Request, StoppingCriteriaParameters,
};
#[derive(Debug, Clone)]
/// Text Generation Inference gRPC multi client
pub struct ShardedClient {
clients: Vec<Client>,
}
impl ShardedClient {
fn new(clients: Vec<Client>) -> Self {
Self { clients }
}
/// Create a new ShardedClient from a master client. The master client will communicate with
/// the other shards and returns all uris/unix sockets with the `service_discovery` gRPC method.
async fn from_master_client(mut master_client: Client) -> Result<Self> {
// Get all uris/unix sockets from the master client
let uris = master_client.service_discovery().await?;
let futures = uris.into_iter().map(Client::connect_uds);
let clients: Result<Vec<Client>> = join_all(futures).await.into_iter().collect();
Ok(Self::new(clients?))
}
/// Returns a client connected to the given uri
pub async fn connect(uri: Uri) -> Result<Self> {
let master_client = Client::connect(uri).await?;
Self::from_master_client(master_client).await
}
/// Returns a client connected to the given unix socket
pub async fn connect_uds(path: String) -> Result<Self> {
let master_client = Client::connect_uds(path).await?;
Self::from_master_client(master_client).await
}
/// Get the model info
#[instrument(skip(self))]
pub async fn info(&mut self) -> Result<ShardInfo> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| client.info())
.collect();
join_all(futures).await.pop().unwrap().map(ShardInfo::from)
}
/// GRPC health check
#[instrument(skip(self))]
pub async fn health(&mut self) -> Result<HealthResponse> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| client.health())
.collect();
join_all(futures).await.pop().unwrap()
}
/// Clear the past generations cache
#[instrument(skip(self))]
pub async fn clear_cache(&mut self, batch_id: Option<u64>) -> Result<()> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| client.clear_cache(batch_id))
.collect();
join_all(futures).await.into_iter().collect()
}
/// Filter a cached batch
#[instrument(skip(self))]
pub async fn filter_batch(
&mut self,
batch_id: u64,
request_ids: Vec<u64>,
) -> Result<Option<CachedBatch>> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| Box::pin(client.filter_batch(batch_id, request_ids.clone())))
.collect();
// all shards return the same message
join_all(futures).await.pop().unwrap()
}
/// Warmup on a max size batch
///
/// Returns the maximum amount of tokens supported by the hardware
#[instrument(skip(self))]
pub async fn warmup(
&mut self,
max_input_length: Option<u32>,
max_prefill_tokens: u32,
max_total_tokens: Option<u32>,
max_batch_size: Option<usize>,
) -> Result<(Option<u32>, u32, u32)> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| {
Box::pin(client.warmup(
max_input_length,
max_prefill_tokens,
max_total_tokens,
max_batch_size,
))
})
.collect();
// Take the minimum value
let results = join_all(futures)
.await
.into_iter()
.collect::<Result<Vec<(Option<u32>, u32, u32)>>>()?;
// Take the minimum value
// Different shards hold different parts of vocab, might yield
// different available block size.
let min = results
.iter()
.min()
.expect("Expect at least 1 warmup result");
Ok(*min)
}
/// Generate one token for each request in the given batch
///
/// Returns Generation for each request in batch
/// and the next cached batch
#[instrument(skip_all, fields(id = & batch.id, size = & batch.size))]
pub async fn prefill(
&mut self,
batch: Batch,
cached_batch: Option<CachedBatch>,
) -> Result<(Vec<Generation>, Option<CachedBatch>, PrefillTimings)> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| Box::pin(client.prefill(batch.clone(), cached_batch.clone())))
.collect();
#[allow(clippy::type_complexity)]
let results: Result<Vec<(Vec<Generation>, Option<CachedBatch>, PrefillTimings)>> =
join_all(futures).await.into_iter().collect();
let mut results = results?;
let (mut generations, next_batch, mut timings) =
results.pop().ok_or(ClientError::EmptyResults)?;
// Merge generations from different model shards
for (mut shard_generations, _, shard_timings) in results.into_iter() {
generations.append(&mut shard_generations);
// Return the timings of the slowest shard
if shard_timings.total > timings.total {
timings = shard_timings;
}
}
Ok((generations, next_batch, timings))
}
/// Generate one token for each request in the given cached batches
///
/// Returns Generation for each request in batches
/// and the next cached batch
#[instrument(skip_all, fields(size = batches.iter().map(| batch | {batch.size}).sum::< u32 > ()))]
pub async fn decode(
&mut self,
batches: Vec<CachedBatch>,
) -> Result<(Vec<Generation>, Option<CachedBatch>, DecodeTimings)> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| Box::pin(client.decode(batches.clone())))
.collect();
#[allow(clippy::type_complexity)]
let results: Result<Vec<(Vec<Generation>, Option<CachedBatch>, DecodeTimings)>> =
join_all(futures).await.into_iter().collect();
let mut results = results?;
let (mut generations, next_batch, mut timings) =
results.pop().ok_or(ClientError::EmptyResults)?;
// Merge generations from different model shards
for (mut shard_generations, _, shard_timings) in results.into_iter() {
generations.append(&mut shard_generations);
// Return the timings of the slowest shard
if shard_timings.total > timings.total {
timings = shard_timings;
}
}
Ok((generations, next_batch, timings))
}
}
impl From<InfoResponse> for ShardInfo {
fn from(value: InfoResponse) -> Self {
Self {
requires_padding: value.requires_padding,
dtype: value.dtype,
device_type: value.device_type,
window_size: value.window_size,
speculate: value.speculate,
}
}
}
#[async_trait]
impl Health for ShardedClient {
async fn device_health(&self) -> Result<()> {
self.clone().health().await?;
Ok(())
}
async fn model_health(&self) -> Result<()> {
// Dummy batch of 1 token and 1 generated token
let liveness_request = Request {
id: u64::MAX,
inputs: "liveness".to_string(),
input_chunks: Some(Input {
chunks: vec![Chunk::Text("liveness".into()).into()],
}),
truncate: 10,
add_special_tokens: true,
prefill_logprobs: false,
parameters: Some(NextTokenChooserParameters {
temperature: 1.0,
top_k: 0,
top_p: 1.0,
typical_p: 1.0,
do_sample: false,
seed: 0,
repetition_penalty: 1.0,
frequency_penalty: 0.0,
watermark: false,
grammar: String::new(),
grammar_type: GrammarType::None as i32,
}),
stopping_parameters: Some(StoppingCriteriaParameters {
max_new_tokens: 1,
stop_sequences: vec![],
ignore_eos_token: false,
}),
top_n_tokens: 0,
// Block 0 is reserved for health checks
blocks: vec![0],
slots: (0..16).collect(),
cache_len: 0,
chunk_len: None,
adapter_id: None,
};
let batch = Batch {
id: u64::MAX,
requests: vec![liveness_request],
size: 1,
max_tokens: 2,
max_blocks: 1,
};
self.clone().prefill(batch, None).await?;
Ok(())
}
}
| text-generation-inference/backends/client/src/v3/sharded_client.rs/0 | {
"file_path": "text-generation-inference/backends/client/src/v3/sharded_client.rs",
"repo_id": "text-generation-inference",
"token_count": 4351
} |
from argparse import ArgumentParser
AWS_S3_CACHING_VARIABLES = {
"AWS_ACCESS_KEY_ID": "aws_access_key_id",
"AWS_SECRET_ACCESS_KEY": "aws_secret_access_key",
"AWS_SESSION_TOKEN": "aws_session_token",
"SCCACHE_REGION": "s3_region",
"SCCACHE_BUCKET": "s3_bucket_name",
}
ALL_CACHING_STORAGE_VARIABLES = {"AWS_S3_CACHING_VARIABLES"}
def setup_sccache_locally():
from os import environ
print("Setting up Local Caching Layer")
for target in ALL_CACHING_STORAGE_VARIABLES:
for envvar in globals()[target].keys():
if envvar in environ:
print(f"Deleted {envvar} from environment variables")
del environ[envvar]
def setup_sccache_for_s3():
from os import environ
print("Setting up AWS S3 Caching Layer")
for envvar in AWS_S3_CACHING_VARIABLES.keys():
if envvar not in environ or not environ[envvar] or len(environ[envvar]) == 0:
print(f"Missing definition for environment variable {envvar}")
if __name__ == "__main__":
parser = ArgumentParser("TensorRT-LLM Build Caching Setup")
parser.add_argument(
"--is-gha-build",
type=str,
default="FALSE",
help="Indicate if the build is from Github Actions",
)
# Parse args
args = parser.parse_args()
args.is_gha_build = args.is_gha_build.lower() in {"on", "true", "1"}
if args.is_gha_build:
setup_sccache_for_s3()
else:
setup_sccache_locally()
| text-generation-inference/backends/trtllm/scripts/setup_sccache.py/0 | {
"file_path": "text-generation-inference/backends/trtllm/scripts/setup_sccache.py",
"repo_id": "text-generation-inference",
"token_count": 663
} |
use crate::client::{
Batch, GrammarType, NextTokenChooserParameters, Request, StoppingCriteriaParameters,
};
use nohash_hasher::{BuildNoHashHasher, IntMap};
use std::cmp::min;
use std::collections::VecDeque;
use text_generation_router::infer::InferError;
use text_generation_router::infer::InferStreamResponse;
use text_generation_router::validation::{
ChunksToString, ValidGenerateRequest, ValidGrammar, ValidParameters, ValidStoppingParameters,
};
use tokio::sync::{mpsc, oneshot};
use tokio::time::Instant;
use tracing::{info_span, instrument, Span};
/// Queue entry
#[derive(Debug)]
pub(crate) struct Entry {
/// Request
pub request: ValidGenerateRequest,
/// Response sender to communicate between the Infer struct and the batching_task
pub response_tx: mpsc::UnboundedSender<Result<InferStreamResponse, InferError>>,
/// Span that will live as long as entry
pub span: Span,
/// Temporary span used as a guard when logging inference, wait times...
pub temp_span: Option<Span>,
/// Instant when this entry was queued
pub queue_time: Instant,
/// Instant when this entry was added to a batch
pub batch_time: Option<Instant>,
}
/// Request Queue
#[derive(Debug, Clone)]
pub(crate) struct Queue {
/// Channel to communicate with the background queue task
queue_sender: mpsc::UnboundedSender<QueueCommand>,
}
impl Queue {
pub(crate) fn new(
requires_padding: bool,
block_size: u32,
window_size: Option<u32>,
speculate: u32,
) -> Self {
// Create channel
let (queue_sender, queue_receiver) = mpsc::unbounded_channel();
// Launch background queue task
tokio::spawn(queue_task(
requires_padding,
block_size,
window_size,
speculate,
queue_receiver,
));
Self { queue_sender }
}
#[instrument(skip_all)]
pub(crate) fn append(&self, entry: Entry) {
// Send append command to the background task managing the state
// Unwrap is safe here
self.queue_sender
.send(QueueCommand::Append(Box::new(entry), Span::current()))
.unwrap();
}
// Get the next batch
#[instrument(skip(self))]
pub(crate) async fn next_batch(
&self,
min_size: Option<usize>,
max_size: Option<usize>,
prefill_token_budget: u32,
token_budget: u32,
) -> Option<NextBatch> {
// Create response channel
let (response_sender, response_receiver) = oneshot::channel();
// Send next batch command to the background task managing the state
// Unwrap is safe here
self.queue_sender
.send(QueueCommand::NextBatch {
min_size,
max_size,
prefill_token_budget,
token_budget,
response_sender,
span: Span::current(),
})
.unwrap();
// Await on response channel
// Unwrap is safe here
response_receiver.await.unwrap()
}
}
// Background task responsible of the queue state
async fn queue_task(
requires_padding: bool,
block_size: u32,
window_size: Option<u32>,
speculate: u32,
mut receiver: mpsc::UnboundedReceiver<QueueCommand>,
) {
let mut state = State::new(requires_padding, block_size, window_size, speculate);
while let Some(cmd) = receiver.recv().await {
match cmd {
QueueCommand::Append(entry, span) => {
span.in_scope(|| state.append(*entry));
metrics::gauge!("tgi_queue_size").increment(1.0);
}
QueueCommand::NextBatch {
min_size,
max_size,
prefill_token_budget,
token_budget,
response_sender,
span,
} => span.in_scope(|| {
let next_batch =
state.next_batch(min_size, max_size, prefill_token_budget, token_budget);
response_sender.send(next_batch).unwrap();
metrics::gauge!("tgi_queue_size").set(state.entries.len() as f64);
}),
}
}
}
/// Queue State
#[derive(Debug)]
struct State {
/// Queue entries organized in a Vec
entries: VecDeque<(u64, Entry)>,
/// Id of the next entry
next_id: u64,
/// Id of the next batch
next_batch_id: u64,
/// Whether the model is using padding
requires_padding: bool,
/// Paged Attention block size
block_size: u32,
/// Sliding window
window_size: Option<u32>,
/// Speculation amount
speculate: u32,
}
impl State {
fn new(
requires_padding: bool,
block_size: u32,
window_size: Option<u32>,
speculate: u32,
) -> Self {
Self {
entries: VecDeque::with_capacity(128),
next_id: 0,
next_batch_id: 0,
requires_padding,
block_size,
window_size,
speculate,
}
}
/// Append an entry to the queue
fn append(&mut self, mut entry: Entry) {
// Create a span that will live as long as the entry is in the queue waiting to be batched
let queue_span = info_span!(parent: &entry.span, "queued");
entry.temp_span = Some(queue_span);
// Push entry in the queue
self.entries.push_back((self.next_id, entry));
self.next_id += 1;
}
// Get the next batch
fn next_batch(
&mut self,
min_size: Option<usize>,
max_size: Option<usize>,
prefill_token_budget: u32,
token_budget: u32,
) -> Option<NextBatch> {
if self.entries.is_empty() {
tracing::debug!("No queue");
return None;
}
// Check if we have enough entries
if let Some(min_size) = min_size {
if self.entries.len() < min_size {
tracing::debug!("Not enough entries");
return None;
}
}
if let Some(max_size) = max_size {
if max_size == 0 {
tracing::debug!("No capacity");
return None;
}
}
// Pad prefill_token_budget to be a multiple of block size
let prefill_token_budget = prefill_token_budget.div_ceil(self.block_size) * self.block_size;
// Create span for this batch to add context to inference calls
let next_batch_span = info_span!(parent: None, "batch", batch_size = tracing::field::Empty);
next_batch_span.follows_from(Span::current());
let mut batch_requests = Vec::with_capacity(self.entries.len());
let mut batch_entries =
IntMap::with_capacity_and_hasher(self.entries.len(), BuildNoHashHasher::default());
let mut max_input_length = 0;
let mut prefill_tokens: u32 = 0;
let mut decode_tokens: u32 = 0;
// Pop entries starting from the front of the queue
while let Some((id, mut entry)) = self.entries.pop_front() {
// Filter entries where the response receiver was dropped (== entries where the request
// was dropped by the client)
if entry.response_tx.is_closed() {
metrics::counter!("tgi_request_failure", "err" => "dropped").increment(1);
tracing::debug!("Dropping entry");
continue;
}
if self.requires_padding {
// We pad to max input length in the Python shards
// We need to take these padding tokens into the equation
max_input_length = max_input_length.max(entry.request.input_length);
prefill_tokens = (batch_requests.len() + 1) as u32 * max_input_length
} else {
// pad to block size
prefill_tokens +=
entry.request.input_length.div_ceil(self.block_size) * self.block_size;
}
if self.requires_padding {
decode_tokens += entry.request.stopping_parameters.max_new_tokens;
} else {
let max_new_tokens = match self.window_size {
None => entry.request.stopping_parameters.max_new_tokens,
Some(window_size) => min(
window_size.saturating_sub(entry.request.input_length),
entry.request.stopping_parameters.max_new_tokens,
),
};
// pad to block size
decode_tokens += max_new_tokens.div_ceil(self.block_size) * self.block_size;
}
if prefill_tokens > prefill_token_budget
|| (prefill_tokens + decode_tokens + self.speculate) > token_budget
{
// Entry is over budget
// Add it back to the front
tracing::debug!("Over budget: prefill_tokens={prefill_tokens} > {prefill_token_budget} || {prefill_tokens} + {decode_tokens} + {} > {token_budget}", self.speculate);
self.entries.push_front((id, entry));
break;
}
tracing::debug!("Accepting entry");
// Create a new span to link the batch back to this entry
let entry_batch_span = info_span!(parent: &entry.span, "infer");
// Add relationships
next_batch_span.follows_from(&entry_batch_span);
entry_batch_span.follows_from(&next_batch_span);
// Update entry
entry.temp_span = Some(entry_batch_span);
batch_requests.push(Request {
id,
prefill_logprobs: entry.request.decoder_input_details,
inputs: entry.request.inputs.chunks_to_string(),
truncate: entry.request.truncate,
parameters: Some(NextTokenChooserParameters::from(
entry.request.parameters.clone(),
)),
stopping_parameters: Some(StoppingCriteriaParameters::from(
entry.request.stopping_parameters.clone(),
)),
top_n_tokens: entry.request.top_n_tokens,
});
// Set batch_time
entry.batch_time = Some(Instant::now());
// Insert in batch_entries IntMap
batch_entries.insert(id, entry);
// Check if max_size
if Some(batch_requests.len()) == max_size {
break;
}
}
// Empty batch
if batch_requests.is_empty() {
tracing::debug!("Filtered out all entries");
return None;
}
// Check if our batch is big enough
if let Some(min_size) = min_size {
// Batch is too small
if batch_requests.len() < min_size {
// Add back entries to the queue in the correct order
for r in batch_requests.into_iter().rev() {
let id = r.id;
let entry = batch_entries.remove(&id).unwrap();
self.entries.push_front((id, entry));
}
return None;
}
}
// Final batch size
let size = batch_requests.len() as u32;
next_batch_span.record("batch_size", size);
let batch = Batch {
id: self.next_batch_id,
requests: batch_requests,
size,
max_tokens: (prefill_tokens + decode_tokens),
};
// Increment batch id
self.next_batch_id += 1;
metrics::histogram!("tgi_batch_next_size").record(batch.size as f64);
Some((batch_entries, batch, next_batch_span))
}
}
type NextBatch = (IntMap<u64, Entry>, Batch, Span);
#[derive(Debug)]
enum QueueCommand {
Append(Box<Entry>, Span),
NextBatch {
min_size: Option<usize>,
max_size: Option<usize>,
prefill_token_budget: u32,
token_budget: u32,
response_sender: oneshot::Sender<Option<NextBatch>>,
span: Span,
},
}
impl From<ValidParameters> for NextTokenChooserParameters {
fn from(value: ValidParameters) -> Self {
let (grammar, grammar_type) = match value.grammar {
None => (String::new(), GrammarType::None),
Some(grammar) => match grammar {
ValidGrammar::Json(grammar_string) => (grammar_string, GrammarType::Json),
ValidGrammar::Regex(grammar_string) => (grammar_string, GrammarType::Regex),
},
};
Self {
temperature: value.temperature,
top_k: value.top_k,
top_p: value.top_p,
typical_p: value.typical_p,
do_sample: value.do_sample,
seed: value.seed,
repetition_penalty: value.repetition_penalty,
frequency_penalty: value.frequency_penalty,
watermark: value.watermark,
grammar,
grammar_type: grammar_type.into(),
}
}
}
impl From<ValidStoppingParameters> for StoppingCriteriaParameters {
fn from(value: ValidStoppingParameters) -> Self {
Self {
max_new_tokens: value.max_new_tokens,
stop_sequences: value.stop_sequences,
ignore_eos_token: value.ignore_eos_token,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Arc;
use tracing::info_span;
fn default_entry() -> (
Entry,
mpsc::UnboundedReceiver<Result<InferStreamResponse, InferError>>,
) {
let (response_tx, receiver_tx) = mpsc::unbounded_channel();
let entry = Entry {
request: ValidGenerateRequest {
inputs: vec![],
input_ids: Some(Arc::new(vec![])),
input_length: 0,
add_special_tokens: true,
truncate: 0,
decoder_input_details: false,
parameters: ValidParameters {
temperature: 0.0,
top_k: 0,
top_p: 0.0,
typical_p: 0.0,
do_sample: false,
seed: 0,
repetition_penalty: 0.0,
frequency_penalty: 0.0,
watermark: false,
grammar: None,
},
stopping_parameters: ValidStoppingParameters {
ignore_eos_token: false,
max_new_tokens: 1,
max_total_new_tokens: 1024,
stop_sequences: vec![],
},
top_n_tokens: 0,
adapter_id: None,
},
response_tx,
span: info_span!("entry"),
temp_span: None,
queue_time: Instant::now(),
batch_time: None,
};
(entry, receiver_tx)
}
#[test]
fn test_append() {
let mut state = State::new(false, 1, None, 0);
let (entry, _guard) = default_entry();
assert_eq!(state.next_id, 0);
assert_eq!(state.entries.len(), 0);
state.append(entry);
assert_eq!(state.next_id, 1);
assert_eq!(state.entries.len(), 1);
let (id, _) = state.entries.remove(0).unwrap();
assert_eq!(id, 0);
}
#[test]
fn test_next_batch_empty() {
let mut state = State::new(false, 1, None, 0);
assert!(state.next_batch(None, None, 1, 1).is_none());
assert!(state.next_batch(Some(1), None, 1, 1).is_none());
}
#[test]
fn test_next_batch_min_size() {
let mut state = State::new(false, 1, None, 0);
let (entry1, _guard1) = default_entry();
let (entry2, _guard2) = default_entry();
state.append(entry1);
state.append(entry2);
let (entries, batch, _) = state.next_batch(None, None, 2, 2).unwrap();
assert_eq!(entries.len(), 2);
assert!(entries.contains_key(&0));
assert!(entries.contains_key(&1));
assert!(entries.get(&0).unwrap().batch_time.is_some());
assert!(entries.get(&1).unwrap().batch_time.is_some());
assert_eq!(batch.id, 0);
assert_eq!(batch.size, 2);
assert_eq!(state.next_id, 2);
assert_eq!(state.entries.len(), 0);
assert_eq!(state.next_batch_id, 1);
let (entry3, _guard3) = default_entry();
state.append(entry3);
assert!(state.next_batch(Some(2), None, 2, 2).is_none());
assert_eq!(state.next_id, 3);
assert_eq!(state.entries.len(), 1);
let (id, _) = state.entries.remove(0).unwrap();
assert_eq!(id, 2);
}
#[test]
fn test_next_batch_max_size() {
let mut state = State::new(false, 1, None, 0);
let (entry1, _guard1) = default_entry();
let (entry2, _guard2) = default_entry();
state.append(entry1);
state.append(entry2);
let (entries, batch, _) = state.next_batch(None, Some(1), 2, 2).unwrap();
assert_eq!(entries.len(), 1);
assert!(entries.contains_key(&0));
assert!(entries.get(&0).unwrap().batch_time.is_some());
assert_eq!(batch.id, 0);
assert_eq!(batch.size, 1);
assert_eq!(state.next_id, 2);
assert_eq!(state.entries.len(), 1);
assert_eq!(state.next_batch_id, 1);
}
#[test]
fn test_next_batch_token_budget() {
let mut state = State::new(false, 1, None, 0);
let (entry1, _guard1) = default_entry();
let (entry2, _guard2) = default_entry();
state.append(entry1);
state.append(entry2);
let (entries, batch, _) = state.next_batch(None, None, 1, 1).unwrap();
assert_eq!(entries.len(), 1);
assert!(entries.contains_key(&0));
assert_eq!(batch.id, 0);
assert_eq!(batch.size, 1);
assert_eq!(state.next_id, 2);
assert_eq!(state.entries.len(), 1);
assert_eq!(state.next_batch_id, 1);
let (entry3, _guard3) = default_entry();
state.append(entry3);
let (entries, batch, _) = state.next_batch(None, None, 3, 3).unwrap();
assert_eq!(entries.len(), 2);
assert!(entries.contains_key(&1));
assert!(entries.contains_key(&2));
assert_eq!(batch.id, 1);
assert_eq!(batch.size, 2);
assert_eq!(state.next_id, 3);
assert_eq!(state.entries.len(), 0);
assert_eq!(state.next_batch_id, 2);
}
#[tokio::test]
async fn test_queue_append() {
let queue = Queue::new(false, 1, None, 0);
let (entry, _guard) = default_entry();
queue.append(entry);
}
#[tokio::test]
async fn test_queue_next_batch_empty() {
let queue = Queue::new(false, 1, None, 0);
assert!(queue.next_batch(None, None, 1, 1).await.is_none());
assert!(queue.next_batch(Some(1), None, 1, 1).await.is_none());
}
#[tokio::test]
async fn test_queue_next_batch_min_size() {
let queue = Queue::new(false, 1, None, 0);
let (entry1, _guard1) = default_entry();
let (entry2, _guard2) = default_entry();
queue.append(entry1);
queue.append(entry2);
let (entries, batch, _) = queue.next_batch(None, None, 2, 2).await.unwrap();
assert_eq!(entries.len(), 2);
assert!(entries.contains_key(&0));
assert!(entries.contains_key(&1));
assert!(entries.get(&0).unwrap().batch_time.is_some());
assert!(entries.get(&1).unwrap().batch_time.is_some());
assert_eq!(batch.id, 0);
assert_eq!(batch.size, 2);
let (entry3, _guard3) = default_entry();
queue.append(entry3);
// Not enough requests pending
assert!(queue.next_batch(Some(2), None, 2, 2).await.is_none());
// Not enough token budget
assert!(queue.next_batch(Some(1), None, 0, 0).await.is_none());
// Ok
let (entries2, batch2, _) = queue.next_batch(Some(1), None, 2, 2).await.unwrap();
assert_eq!(entries2.len(), 1);
assert!(entries2.contains_key(&2));
assert!(entries2.get(&2).unwrap().batch_time.is_some());
assert_eq!(batch2.id, 1);
assert_eq!(batch2.size, 1);
}
#[tokio::test]
async fn test_queue_next_batch_max_size() {
let queue = Queue::new(false, 1, None, 0);
let (entry1, _guard1) = default_entry();
let (entry2, _guard2) = default_entry();
queue.append(entry1);
queue.append(entry2);
let (entries, batch, _) = queue.next_batch(None, Some(1), 2, 2).await.unwrap();
assert_eq!(entries.len(), 1);
assert!(entries.contains_key(&0));
assert!(entries.get(&0).unwrap().batch_time.is_some());
assert_eq!(batch.id, 0);
assert_eq!(batch.size, 1);
}
#[tokio::test]
async fn test_queue_next_batch_token_budget() {
let queue = Queue::new(false, 1, None, 0);
let (entry1, _guard1) = default_entry();
let (entry2, _guard2) = default_entry();
queue.append(entry1);
queue.append(entry2);
let (entries, batch, _) = queue.next_batch(None, None, 1, 1).await.unwrap();
assert_eq!(entries.len(), 1);
assert!(entries.contains_key(&0));
assert_eq!(batch.id, 0);
assert_eq!(batch.size, 1);
let (entry3, _guard3) = default_entry();
queue.append(entry3);
let (entries, batch, _) = queue.next_batch(None, None, 3, 3).await.unwrap();
assert_eq!(entries.len(), 2);
assert!(entries.contains_key(&1));
assert!(entries.contains_key(&2));
assert_eq!(batch.id, 1);
assert_eq!(batch.size, 2);
}
#[tokio::test]
async fn test_queue_next_batch_token_speculate() {
let queue = Queue::new(false, 1, None, 2);
let (entry1, _guard1) = default_entry();
let (entry2, _guard2) = default_entry();
queue.append(entry1);
queue.append(entry2);
// Budget of 1 is not enough
assert!(queue.next_batch(None, None, 1, 1).await.is_none());
let (entries, batch, _) = queue.next_batch(None, None, 6, 6).await.unwrap();
assert_eq!(entries.len(), 2);
assert!(entries.contains_key(&0));
assert!(entries.contains_key(&1));
assert_eq!(batch.id, 0);
assert_eq!(batch.size, 2);
}
#[tokio::test]
async fn test_queue_next_batch_dropped_receiver() {
let queue = Queue::new(false, 1, None, 0);
let (entry, _) = default_entry();
queue.append(entry);
assert!(queue.next_batch(None, None, 1, 1).await.is_none());
}
}
| text-generation-inference/backends/v2/src/queue.rs/0 | {
"file_path": "text-generation-inference/backends/v2/src/queue.rs",
"repo_id": "text-generation-inference",
"token_count": 11054
} |
/// Inspired by https://github.com/orhun/rust-tui-template/blob/472aa515119d4c94903eac12d9784417281dc7f5/src/event.rs
use ratatui::crossterm::event;
use std::time::{Duration, Instant};
use tokio::sync::{broadcast, mpsc};
/// Events
#[derive(Debug)]
pub(crate) enum Event {
/// Terminal tick.
Tick,
/// Key press.
Key(event::KeyEvent),
/// Terminal resize.
Resize,
}
pub(crate) async fn terminal_event_task(
fps: u32,
event_sender: mpsc::Sender<Event>,
mut shutdown_receiver: broadcast::Receiver<()>,
_shutdown_guard_sender: mpsc::Sender<()>,
) {
// End task if a message is received on shutdown_receiver
// _shutdown_guard_sender will be dropped once the task is finished
tokio::select! {
_ = event_loop(fps, event_sender) => {
},
_ = shutdown_receiver.recv() => {}
}
}
/// Main event loop
async fn event_loop(fps: u32, event_sender: mpsc::Sender<Event>) {
// Frame budget
let per_frame = Duration::from_secs(1) / fps;
// When was last frame executed
let mut last_frame = Instant::now();
loop {
// Sleep to avoid blocking the thread for too long
if let Some(sleep) = per_frame.checked_sub(last_frame.elapsed()) {
tokio::time::sleep(sleep).await;
}
// Get crossterm event and send a new one over the channel
if event::poll(Duration::from_secs(0)).expect("no events available") {
match event::read().expect("unable to read event") {
event::Event::Key(e) => event_sender.send(Event::Key(e)).await.unwrap_or(()),
event::Event::Resize(_w, _h) => {
event_sender.send(Event::Resize).await.unwrap_or(())
}
_ => (),
}
}
// Frame budget exceeded
if last_frame.elapsed() >= per_frame {
// Send tick
event_sender.send(Event::Tick).await.unwrap_or(());
// Rest last_frame time
last_frame = Instant::now();
}
}
}
| text-generation-inference/benchmark/src/event.rs/0 | {
"file_path": "text-generation-inference/benchmark/src/event.rs",
"repo_id": "text-generation-inference",
"token_count": 917
} |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.7.0"
DEPRECATION_WARNING = (
"`text_generation` clients are deprecated and will be removed in the near future. "
"Please use the `InferenceClient` from the `huggingface_hub` package instead."
)
from text_generation.client import Client, AsyncClient # noqa E402
from text_generation.inference_api import ( # noqa E402
InferenceAPIClient,
InferenceAPIAsyncClient,
)
__all__ = [
"Client",
"AsyncClient",
"InferenceAPIClient",
"InferenceAPIAsyncClient",
]
| text-generation-inference/clients/python/text_generation/__init__.py/0 | {
"file_path": "text-generation-inference/clients/python/text_generation/__init__.py",
"repo_id": "text-generation-inference",
"token_count": 338
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 323,
"logprob": -1.1171875,
"special": false,
"text": " and"
},
{
"id": 1268,
"logprob": -0.9477539,
"special": false,
"text": " how"
},
{
"id": 1587,
"logprob": -0.51464844,
"special": false,
"text": " does"
},
{
"id": 433,
"logprob": -0.043182373,
"special": false,
"text": " it"
},
{
"id": 1782,
"logprob": -1.0810547,
"special": false,
"text": " differ"
},
{
"id": 505,
"logprob": -0.005054474,
"special": false,
"text": " from"
},
{
"id": 8776,
"logprob": -0.47485352,
"special": false,
"text": " traditional"
},
{
"id": 5780,
"logprob": -0.15112305,
"special": false,
"text": " machine"
},
{
"id": 6975,
"logprob": -0.0011291504,
"special": false,
"text": " learning"
},
{
"id": 5380,
"logprob": -0.31323242,
"special": false,
"text": "?\n"
}
],
"top_tokens": null
},
"generated_text": " and how does it differ from traditional machine learning?\n"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_compressed_tensors_w8a8_int/test_compressed_tensors_w8a8_int.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_compressed_tensors_w8a8_int/test_compressed_tensors_w8a8_int.json",
"repo_id": "text-generation-inference",
"token_count": 873
} |
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": " the royal mouse? It is a little more slender and only weighs around 1.5 pounds for males and 1.3 pounds",
"role": "assistant"
}
}
],
"created": 1732541190,
"id": "",
"model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
"object": "chat.completion",
"system_fingerprint": "2.4.1-dev0-native",
"usage": {
"completion_tokens": 30,
"prompt_tokens": 73,
"total_tokens": 103
}
}
| text-generation-inference/integration-tests/models/__snapshots__/test_continue_final_message/test_llama_completion_single_prompt_continue.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_continue_final_message/test_llama_completion_single_prompt_continue.json",
"repo_id": "text-generation-inference",
"token_count": 260
} |
[
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Jeff Walker's Product Launch Formula is a comprehensive system",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 69,
"total_tokens": 79
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Here are three key indicators to determine if a customer",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 52,
"total_tokens": 62
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "You can use the `String.format()` method in",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 97,
"total_tokens": 107
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "In a realm of binary mysticism, we find",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 126,
"total_tokens": 136
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "The `dummy` variable is being used to consume",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 305,
"total_tokens": 315
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "You can add multiple new columns in Power Query (",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 51,
"total_tokens": 61
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "There are many exciting new technologies emerging across various fields",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 52,
"total_tokens": 62
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Poly Ether Ether Ketone (PEEK) is",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 40,
"total_tokens": 50
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Here's a technical overview of a referral system similar",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 85,
"total_tokens": 95
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Here's an example of how you can add an",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 45,
"total_tokens": 55
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "I'd be happy to help with Java. What",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 43,
"total_tokens": 53
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "I can help you plan a road trip from Pune",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 82,
"total_tokens": 92
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "I'd be happy to explain more about a topic",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 38,
"total_tokens": 48
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "I'd be happy to help you brainstorm and provide",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 47,
"total_tokens": 57
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Implementing a Minesweeper algorithm using algebraic",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 54,
"total_tokens": 64
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "There are several issues with the provided code:\n\n1",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 375,
"total_tokens": 385
}
},
{
"choices": [
{
"finish_reason": "stop",
"index": 0,
"logprobs": null,
"message": {
"content": ";)",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085330,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 2,
"prompt_tokens": 105,
"total_tokens": 107
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "As I delved into the world of high-st",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 2097,
"total_tokens": 2107
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "/u/CruxHub: Hi, I'm",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 2614,
"total_tokens": 2624
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "To simulate a conversation between Alice and /u/C",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 1070,
"total_tokens": 1080
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Alice: Hey /u/CruxHub,",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 1847,
"total_tokens": 1857
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Alice: Hi /u/CruxHub,",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 1849,
"total_tokens": 1859
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "/u/CruxHub: Hey Alice, I",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 1004,
"total_tokens": 1014
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "/u/CruxHub: Hey Alice, I",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 1100,
"total_tokens": 1110
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "/u/CruxHub: Hey Alice, I",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 1044,
"total_tokens": 1054
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "The Dogme approach and the Lexical Approach are",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 54,
"total_tokens": 64
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Implementing a netfilter in Linux with a Rust",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 48,
"total_tokens": 58
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Damage to the Ulnar nerve can cause numb",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 56,
"total_tokens": 66
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "The Space Shuttle's Reaction Control System (RCS",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 50,
"total_tokens": 60
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "I can provide you with a basic Python script that",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 65,
"total_tokens": 75
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Farming meat has several negative impacts on the environment",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 43,
"total_tokens": 53
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "The photograph filter you're referring to is called \"",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 51,
"total_tokens": 61
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Here's a sample geological database structure with some example",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 59,
"total_tokens": 69
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "**Web Marketing: A Simplified Explanation**\n\nWeb",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 45,
"total_tokens": 55
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Here's a rewritten and improved version of the story",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 447,
"total_tokens": 457
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Here are the questions rewritten in a more conversational",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 168,
"total_tokens": 178
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "**Learning Progress: 0%**\n\n| Topic",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 216,
"total_tokens": 226
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "I couldn't find any information on a person named",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 44,
"total_tokens": 54
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Here's a list of the largest outdoor retailers in",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 43,
"total_tokens": 53
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "To create a WordPress shortcode that includes Facebook SDK code",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 49,
"total_tokens": 59
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "The sentence is mostly grammatically correct, but there",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 78,
"total_tokens": 88
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "I'd be happy to engage in a debate with",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 59,
"total_tokens": 69
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "I'd love to hear about your business. As",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 64,
"total_tokens": 74
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "I'll wait for your request to proceed with part",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 2410,
"total_tokens": 2420
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "The final part of the Day Sculpting program emphasizes",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 2699,
"total_tokens": 2709
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "**Analysis of the Coming of Age Story Archetype",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 349,
"total_tokens": 359
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "The Apostle John is one of the most prominent figures",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 49,
"total_tokens": 59
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "To build a Google Places autocomplete feature on Jetpack",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 427,
"total_tokens": 437
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "The information provided does not mention the captain's name",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 169,
"total_tokens": 179
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "The metaverse is a shared, immersive and interactive",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 39,
"total_tokens": 49
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Here are some ideas for a series of articles for",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 50,
"total_tokens": 60
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "\"Purim Palooza Alert: \n\nTo",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 78,
"total_tokens": 88
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "**Summary of the paper in 10 points:",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 2022,
"total_tokens": 2032
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "You'll provide three pieces of text, and then",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 58,
"total_tokens": 68
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "I'm ready to proceed with text 3.",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 1650,
"total_tokens": 1660
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "I'm ready to answer questions on Text 1",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 1116,
"total_tokens": 1126
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "This is a Solidity contract written in the older",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 334,
"total_tokens": 344
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "**Speech Recognition and Synthesis using Python**\n\nTo",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 84,
"total_tokens": 94
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "I'd be happy to help you discuss a paper",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 42,
"total_tokens": 52
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "To handle the given utterance, we can use",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 375,
"total_tokens": 385
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "**Subscription Services Template:**\n\n**Title:** Virtual",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 443,
"total_tokens": 453
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Hello. How can I assist you today?",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 36,
"total_tokens": 46
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Differentiating yourself from other Etsy shops is crucial to",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 102,
"total_tokens": 112
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "To become a Licensed Marriage and Family Therapist (",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 53,
"total_tokens": 63
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "**What is Quantum Computing?**\n\nQuantum computing",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 42,
"total_tokens": 52
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Aquí te dejo 40 opciones de nombres",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 108,
"total_tokens": 118
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Deposition is a geological process that involves the transportation",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 38,
"total_tokens": 48
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Here are some good e-governance initiatives in",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 55,
"total_tokens": 65
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Here's a simple Python program that accepts a command",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 56,
"total_tokens": 66
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Imagine you're playing with a toy box. You",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 47,
"total_tokens": 57
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Here's an example of a question they might ask",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 66,
"total_tokens": 76
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Arduino Uno adalah sebuah papan mikrokontrol",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 38,
"total_tokens": 48
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "To edit an array that is within an object,",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 42,
"total_tokens": 52
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Microsoft ENTRA (Enterprise Mobility + Security) is",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 56,
"total_tokens": 66
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "To calculate the difference in interest paid between a simple",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 69,
"total_tokens": 79
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Yes, you can use Spring State Machine and Spring",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 49,
"total_tokens": 59
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "The issue lies in the fact that the `meta",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 142,
"total_tokens": 152
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Here are some effective marketing tactics for local small businesses",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 46,
"total_tokens": 56
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "The French Revolution, which lasted from 1789",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 41,
"total_tokens": 51
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "**Roles of a Network Driver:**\n\nA network",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 65,
"total_tokens": 75
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Yes, I'm familiar with the SAS (Stat",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 44,
"total_tokens": 54
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Using relays to control 12V solen",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 60,
"total_tokens": 70
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "You can use the following Python code to achieve this",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 55,
"total_tokens": 65
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Here are some prompts for viral comics:\n\n1.",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 336,
"total_tokens": 346
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "To simplify and make the comic funnier, consider",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 301,
"total_tokens": 311
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Here's a rewritten version of the 4-panel",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 282,
"total_tokens": 292
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Subject: Request for E-Waste Collection and Computer",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 110,
"total_tokens": 120
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "In the context of conference calls, the state you",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 84,
"total_tokens": 94
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "I can provide a general classification of companies based on",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 56,
"total_tokens": 66
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Here are some user stories that describe the concept in",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 44,
"total_tokens": 54
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "You can check your Python version by running the following",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 39,
"total_tokens": 49
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "**Scenario:**\n\n15-year-old Black youth,",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 473,
"total_tokens": 483
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "As a Demand Generation Manager for a B2B",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 50,
"total_tokens": 60
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "The error is due to a typo in your code",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085336,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 369,
"total_tokens": 379
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "고등교육의 필요성에 관한 영어 에",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 72,
"total_tokens": 82
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "Here's a simple C# program that uses the",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 51,
"total_tokens": 61
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "The error message \"connection refused\" indicates that the",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085331,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 85,
"total_tokens": 95
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "To load an image, you can use various methods",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1726085326,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "chat.completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 41,
"total_tokens": 51
}
}
]
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_prefix/test_flash_llama_load.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_prefix/test_flash_llama_load.json",
"repo_id": "text-generation-inference",
"token_count": 32395
} |
{
"details": {
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 100,
"logprob": -0.9824219,
"special": false,
"text": "_"
},
{
"id": 5879,
"logprob": -0.3017578,
"special": false,
"text": "world"
},
{
"id": 2284,
"logprob": -0.68652344,
"special": false,
"text": "():"
},
{
"id": 303,
"logprob": -0.27734375,
"special": false,
"text": "\n "
},
{
"id": 1489,
"logprob": -0.4482422,
"special": false,
"text": " print"
},
{
"id": 459,
"logprob": -0.54248047,
"special": false,
"text": "(\""
},
{
"id": 8302,
"logprob": -0.4296875,
"special": false,
"text": "Hello"
},
{
"id": 10914,
"logprob": -0.8544922,
"special": false,
"text": " World"
},
{
"id": 16013,
"logprob": -0.7573242,
"special": false,
"text": "!\")"
},
{
"id": 222,
"logprob": -0.81347656,
"special": false,
"text": "\n"
}
]
},
"generated_text": "_world():\n print(\"Hello World!\")\n"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder2_lora/test_flash_starcoder2_with_hugcode_adapter.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder2_lora/test_flash_starcoder2_with_hugcode_adapter.json",
"repo_id": "text-generation-inference",
"token_count": 852
} |
import pytest
@pytest.fixture(scope="module")
def flash_falcon_handle(launcher):
with launcher("tiiuae/falcon-7b", trust_remote_code=True) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_falcon(flash_falcon_handle):
await flash_falcon_handle.health(300)
return flash_falcon_handle.client
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_falcon(flash_falcon, response_snapshot):
response = await flash_falcon.generate(
"Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:",
max_new_tokens=10,
decoder_input_details=True,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_falcon_all_params(flash_falcon, response_snapshot):
response = await flash_falcon.generate(
"Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_falcon_load(flash_falcon, generate_load, response_snapshot):
responses = await generate_load(
flash_falcon,
"Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:",
max_new_tokens=10,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_falcon.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_falcon.py",
"repo_id": "text-generation-inference",
"token_count": 908
} |
import pytest
@pytest.fixture(scope="module")
def flash_mistral_handle(launcher):
with launcher("mistralai/Mistral-7B-Instruct-v0.1") as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_mistral(flash_mistral_handle):
await flash_mistral_handle.health(300)
return flash_mistral_handle.client
@pytest.mark.asyncio
async def test_flash_mistral(flash_mistral, response_snapshot):
response = await flash_mistral.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response.generated_text == ": Let n = 10 - 1"
assert response == response_snapshot
@pytest.mark.asyncio
async def test_flash_mistral_all_params(flash_mistral, response_snapshot):
response = await flash_mistral.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
async def test_flash_mistral_load(flash_mistral, generate_load, response_snapshot):
responses = await generate_load(
flash_mistral, "Test request", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all(
[r.generated_text == responses[0].generated_text for r in responses]
), f"{[r.generated_text for r in responses]}"
assert responses[0].generated_text == ": Let n = 10 - 1"
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_mistral.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_mistral.py",
"repo_id": "text-generation-inference",
"token_count": 714
} |
import pytest
@pytest.fixture(scope="module")
def flash_starcoder_gptq_handle(launcher):
with launcher("Narsil/starcoder-gptq", num_shard=2, quantize="gptq") as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_starcoder_gptq(flash_starcoder_gptq_handle):
await flash_starcoder_gptq_handle.health(300)
return flash_starcoder_gptq_handle.client
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_starcoder_gptq(flash_starcoder_gptq, generous_response_snapshot):
response = await flash_starcoder_gptq.generate(
"def geometric_mean(L: List[float]):",
max_new_tokens=20,
decoder_input_details=True,
)
assert response.details.generated_tokens == 2
assert response == generous_response_snapshot
# Deactivated because it's flaky
# Only this model seems affected and it's only a logprob precision issue.
# @pytest.mark.release
# @pytest.mark.asyncio
# async def test_flash_starcoder_gptq_default_params(
# flash_starcoder_gptq, generous_response_snapshot
# ):
# response = await flash_starcoder_gptq.generate(
# "def geometric_mean(L: List[float]):",
# max_new_tokens=20,
# temperature=0.2,
# top_p=0.95,
# decoder_input_details=True,
# seed=0,
# )
# assert response.details.generated_tokens == 2
# assert response == generous_response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_starcoder_gptq_load(
flash_starcoder_gptq, generate_load, generous_response_snapshot
):
responses = await generate_load(
flash_starcoder_gptq,
"def geometric_mean(L: List[float]):",
max_new_tokens=10,
n=4,
)
assert len(responses) == 4
# XXX: TODO: Fix this test.
# assert all([r.generated_text == responses[0].generated_text for r in responses])
# assert responses == generous_response_snapshot
| text-generation-inference/integration-tests/models/test_flash_starcoder_gptq.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_starcoder_gptq.py",
"repo_id": "text-generation-inference",
"token_count": 802
} |
import pytest
@pytest.fixture(scope="module")
def t5_sharded_handle(launcher):
with launcher("google/flan-t5-xxl", num_shard=4) as handle:
yield handle
@pytest.fixture(scope="module")
async def t5_sharded(t5_sharded_handle):
await t5_sharded_handle.health(300)
return t5_sharded_handle.client
@pytest.mark.release
@pytest.mark.asyncio
async def test_t5_sharded(t5_sharded, response_snapshot):
response = await t5_sharded.generate(
"Please answer the following question. What is the boiling point of Nitrogen?",
max_new_tokens=10,
decoder_input_details=True,
)
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_t5_sharded_load(t5_sharded, generate_load, response_snapshot):
responses = await generate_load(
t5_sharded,
"Please answer the following question. What is the boiling point of Nitrogen?",
max_new_tokens=10,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_t5_sharded.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_t5_sharded.py",
"repo_id": "text-generation-inference",
"token_count": 443
} |
import datasets
import json
dataset = datasets.load_dataset("ccdv/govreport-summarization")
max_new_tokens = 50
conversations = []
for i, item in enumerate(dataset["test"]):
report = item["report"]
messages = [{"from": "human", "value": f"Summarize this report: ```{report}```"}]
conversations.append({"conversations": messages})
with open("long.json", "w") as f:
json.dump(conversations, f, indent=4)
| text-generation-inference/load_tests/long.py/0 | {
"file_path": "text-generation-inference/load_tests/long.py",
"repo_id": "text-generation-inference",
"token_count": 156
} |
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(tag = "model_type")]
#[serde(rename_all = "snake_case")]
pub struct LlavaNext {
pub(crate) text_config: TextConfig,
pub(crate) vision_config: VisionConfig,
pub(crate) image_grid_pinpoints: Vec<(usize, usize)>,
}
fn get_anyres_image_grid_shape(
height: usize,
width: usize,
grid_pinpoints: &[(usize, usize)],
patch_size: usize,
) -> (usize, usize) {
let (height, width) = select_best_resolution(height, width, grid_pinpoints);
(height / patch_size, width / patch_size)
}
/// Selects the best resolution from a list of possible resolutions based on the original size.
/// This is done by calculating the effective and wasted resolution for each possible resolution.
/// The best fit resolution is the one that maximizes the effective resolution and minimizes the wasted resolution.
fn select_best_resolution(
original_height: usize,
original_width: usize,
possible_resolutions: &[(usize, usize)],
) -> (usize, usize) {
let mut best_fit = None;
let mut max_effective_resolution = 0;
let mut min_wasted_resolution = f32::NEG_INFINITY;
for (height, width) in possible_resolutions {
let wscale = *width as f32 / original_width as f32;
let hscale = *height as f32 / original_height as f32;
// f32 partial ord.
let scale = if wscale > hscale { hscale } else { wscale };
let downscaled_width = (*width as f32 * scale) as usize;
let downscaled_height = (*height as f32 * scale) as usize;
let effective_resolution = std::cmp::min(
downscaled_width * downscaled_height,
original_width * original_height,
);
let wasted_resolution = (width * height) - effective_resolution;
if effective_resolution > max_effective_resolution
|| (effective_resolution == max_effective_resolution
&& (wasted_resolution as f32) < min_wasted_resolution)
{
max_effective_resolution = effective_resolution;
min_wasted_resolution = wasted_resolution as f32;
best_fit = Some((*height, *width));
}
}
best_fit.unwrap_or((original_height, original_width))
}
fn get_unpadded_features(
height: usize,
width: usize,
npatches: usize,
num_patch_height: usize,
num_patch_width: usize,
) -> (usize, usize) {
let current_height = npatches * num_patch_height;
let current_width = npatches * num_patch_width;
let aspect_ratio: f64 = width as f64 / height as f64;
let current_aspect_ratio: f64 = current_width as f64 / current_height as f64;
let (current_height, current_width) = if aspect_ratio > current_aspect_ratio {
let new_height = (height * current_width) / width;
let padding = (current_height - new_height) / 2;
(current_height - (2 * padding), current_width)
} else {
let new_width = (width * current_height) / height;
let padding = (current_width - new_width) / 2;
(current_height, current_width - (2 * padding))
};
let unpadded_features = current_height * current_width;
let newline_features = current_height;
(unpadded_features, newline_features)
}
impl LlavaNext {
pub fn get_number_of_features(&self, height: usize, width: usize) -> usize {
let image_size = self.vision_config.image_size;
let patch_size = self.vision_config.patch_size;
assert!(image_size % patch_size == 0);
let npatches = image_size / patch_size;
// Dimensions are intentionally swapped to be bug-compatible with
// upstream: https://github.com/LLaVA-VL/LLaVA-NeXT/issues/59
let (num_patch_width, num_patch_height) =
get_anyres_image_grid_shape(height, width, &self.image_grid_pinpoints, image_size);
let (unpadded_features, newline_features) =
get_unpadded_features(height, width, npatches, num_patch_height, num_patch_width);
// The base patch covers the entire image
let base_features = npatches.pow(2);
unpadded_features + newline_features + base_features
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct ClipVisionModel {
image_size: usize,
patch_size: usize,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct Idefics3 {}
impl Idefics3 {
pub fn get_max_longest_edge(&self) -> usize {
364
}
pub fn get_number_of_features(&self) -> usize {
169
}
pub fn get_max_longest_edge_for_image_resize(&self) -> usize {
1456
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct Idefics2 {}
impl Idefics2 {
pub fn get_number_of_features(&self, _height: usize, _width: usize) -> usize {
64
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct PaliTextConfig {
pub(crate) num_image_tokens: usize,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct Paligemma {
pub(crate) text_config: PaliTextConfig,
}
impl Paligemma {
pub fn get_number_of_features(&self, _height: usize, _width: usize) -> usize {
self.text_config.num_image_tokens
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct Qwen2VlVisionConfig {
pub(crate) depth: usize,
pub(crate) embed_dim: usize,
pub(crate) mlp_ratio: usize,
pub(crate) num_heads: usize,
pub(crate) in_chans: usize,
pub(crate) hidden_size: usize,
pub(crate) patch_size: usize,
pub(crate) spatial_merge_size: usize,
pub(crate) spatial_patch_size: usize,
pub(crate) temporal_patch_size: usize,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct Qwen2Vl {
pub(crate) vision_config: Qwen2VlVisionConfig,
}
impl Qwen2Vl {
pub fn get_number_of_features(&self, height: usize, width: usize) -> usize {
let num_pixels = height * width;
num_pixels / self.vision_config.patch_size.pow(2)
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(tag = "model_type")]
#[serde(rename_all = "snake_case")]
pub enum Config {
Qwen2Vl(Qwen2Vl),
LlavaNext(LlavaNext),
ClipVisionModel(ClipVisionModel),
Mistral,
Mamba,
Idefics,
Mllama,
Idefics2(Idefics2),
Idefics3(Idefics3),
Ssm,
GptBigcode,
Granite,
Santacoder,
Bloom,
Mpt,
Gpt2,
Gptj,
GptNeox,
Phi,
#[serde(rename = "phi-msft")]
PhiMsft,
Phi3,
Phimoe,
Llama,
Baichuan,
Paligemma(Paligemma),
Gemma,
Gemma2,
Cohere,
Drbx,
Falcon,
Mixtral,
Starcoder2,
Qwen2,
Opt,
T5,
DeepseekV2,
DeepseekV3,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct TextConfig {}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct VisionConfig {
pub(crate) image_size: usize,
pub(crate) patch_size: usize,
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_llava_next_features() {
let config = LlavaNext {
text_config: TextConfig {},
vision_config: VisionConfig {
image_size: 336,
patch_size: 14,
},
image_grid_pinpoints: vec![
(336, 672),
(672, 336),
(672, 672),
(1008, 336),
(336, 1008),
],
};
let slots = config.get_number_of_features(20, 20);
assert_eq!(slots, 1176);
let slots = config.get_number_of_features(640, 640);
assert_eq!(slots, 2928);
let slots = config.get_number_of_features(480, 640);
assert_eq!(slots, 2340);
let slots = config.get_number_of_features(899, 1024);
assert_eq!(slots, 2634);
let slots = config.get_number_of_features(1024, 899);
assert_eq!(slots, 2640);
let slots = config.get_number_of_features(1067, 1600);
assert_eq!(slots, 2144);
}
}
| text-generation-inference/router/src/config.rs/0 | {
"file_path": "text-generation-inference/router/src/config.rs",
"repo_id": "text-generation-inference",
"token_count": 3641
} |
# Fork that adds only the correct stream to this kernel in order
# to make cuda graphs work.
awq_commit := bd1dc2d5254345cc76ab71894651fb821275bdd4
awq:
rm -rf llm-awq
git clone https://github.com/huggingface/llm-awq
build-awq: awq
cd llm-awq/ && git fetch && git checkout $(awq_commit)
cd llm-awq/awq/kernels && python setup.py build
install-awq: build-awq
pip uninstall awq_inference_engine -y || true
cd llm-awq/awq/kernels && python setup.py install
| text-generation-inference/server/Makefile-awq/0 | {
"file_path": "text-generation-inference/server/Makefile-awq",
"repo_id": "text-generation-inference",
"token_count": 183
} |
// Adapted from turboderp exllama: https://github.com/turboderp/exllama
#ifndef _cuda_buffers_cuh
#define _cuda_buffers_cuh
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cstdint>
#include <cstdio>
const int CUDA_MAX_DEVICES = 16;
// #ifndef _cuda_buffers_cu
// extern __constant__ half2 q4_table[16][256];
// #endif
class CudaBuffers
{
public:
int device;
half* temp_state; // [max_hidden_rows * intermediate_size]
half* temp_dq; // size of largest quant tensor * 8
cudaStream_t alt_stream_1;
cudaStream_t alt_stream_2;
cudaStream_t alt_stream_3;
cudaEvent_t alt_stream_1_done;
cudaEvent_t alt_stream_2_done;
cudaEvent_t alt_stream_3_done;
CudaBuffers
(
int _device,
half* _temp_state,
half* _temp_dq
);
~CudaBuffers();
};
CudaBuffers* get_buffers(const int device_index);
void prepare_buffers_cuda
(
int _device,
half* _temp_state,
half* _temp_dq
);
void cleanup_buffers_cuda();
#endif
| text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_buffers.cuh/0 | {
"file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_buffers.cuh",
"repo_id": "text-generation-inference",
"token_count": 471
} |
#ifndef _matrix_view_cuh
#define _matrix_view_cuh
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "quant/qdq_util.cuh"
class MatrixView_half
{
public:
const half* data;
const int height;
const int width;
__device__ __forceinline__ MatrixView_half(const half* data, const int height, const int width)
: data(data), height(height), width(width)
{ }
__device__ __forceinline__ half item(int row, int column) const { return data[row * width + column]; }
__device__ __forceinline__ half2 item_half2(int row, int column) const { return ((half2*)data)[(row * width + column) / 2]; }
__device__ __forceinline__ half2 item_half2half2(int row, int column) const { return __half2half2(data[row * width + column]); }
__device__ __forceinline__ const half* item_ptr(int row, int column) const { return &data[row * width + column]; }
__device__ __forceinline__ void item4(half (&items)[4], int row, int column) const
{
half2* ptr = (half2*) item_ptr(row, column);
half2 i01 = ptr[0];
half2 i23 = ptr[1];
items[0] = __low2half(i01);
items[1] = __high2half(i01);
items[2] = __low2half(i23);
items[3] = __high2half(i23);
}
__device__ __forceinline__ void item4_f(float (&items)[4], int row, int column) const
{
half2* ptr = (half2*)item_ptr(row, column);
half2 i01 = ptr[0];
half2 i23 = ptr[1];
items[0] = __half2float(__low2half(i01));
items[1] = __half2float(__high2half(i01));
items[2] = __half2float(__low2half(i23));
items[3] = __half2float(__high2half(i23));
}
__device__ __forceinline__ void item4_h2(half2 (&items)[4], int row, int column) const
{
half2* ptr = (half2*)item_ptr(row, column);
half2 i01 = ptr[0];
half2 i23 = ptr[1];
items[0] = __half2half2(__low2half(i01));
items[1] = __half2half2(__high2half(i01));
items[2] = __half2half2(__low2half(i23));
items[3] = __half2half2(__high2half(i23));
}
};
class MatrixView_half_rw
{
public:
half* data;
const int height;
const int width;
__device__ __forceinline__ MatrixView_half_rw(half* data, const int height, const int width)
: data(data), height(height), width(width)
{ }
__device__ __forceinline__ half item(int row, int column) const { return data[row * width + column]; }
__device__ __forceinline__ half2 item_half2(int row, int column) const { return ((half2*)data)[(row * width + column) / 2]; }
__device__ __forceinline__ half* item_ptr(int row, int column) { return &data[row * width + column]; }
__device__ __forceinline__ void set(int row, int column, half value) { data[row * width + column] = value; }
__device__ __forceinline__ void set_half2(int row, int column, half2 value) { ((half2*)data)[(row * width + column) / 2] = value; }
__device__ __forceinline__ void set4(int row, int column, half v0, half v1, half v2, half v3)
{
half2 v01 = __halves2half2(v0, v1);
half2 v23 = __halves2half2(v2, v3);
half2* ptr = (half2*) item_ptr(row, column);
ptr[0] = v01;
ptr[1] = v23;
}
};
class MatrixView_q4_row
{
public:
const uint32_t* data;
const int height;
const int width;
__device__ __forceinline__ MatrixView_q4_row(const uint32_t* data, const int height, const int width)
: data(data), height(height), width(width)
{ }
__device__ __forceinline__ int item(int row, int column) const
{
int shift = (column & 0x07) * 4;
return (data[row * width / 8 + column / 8] >> shift) & 0x0f;
}
__device__ __forceinline__ void item2(int (&items)[2], int row, int column) const
{
int shift = (column & 0x07) * 4;
uint32_t d = data[row * width / 8 + column / 8] >> shift;
items[0] = d & 0x0f;
items[1] = (d >> 4) & 0x0f;
}
__device__ __forceinline__ void item4(int (&items)[4], int row, int column) const
{
int shift = (column & 0x07) * 4;
uint32_t d = data[row * width / 8 + column / 8] >> shift;
items[0] = d & 0x0f;
items[1] = (d >> 4) & 0x0f;
items[2] = (d >> 8) & 0x0f;
items[3] = (d >> 12) & 0x0f;
}
};
#endif
| text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/matrix_view.cuh/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/matrix_view.cuh",
"repo_id": "text-generation-inference",
"token_count": 1862
} |
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
import torch
extra_cuda_cflags = ["-lineinfo", "-O3"]
extra_cflags = []
if torch.version.hip:
extra_cflags = ["-DLEGACY_HIPBLAS_DIRECT=ON"]
extra_cuda_cflags += ["-DHIPBLAS_USE_HIP_HALF", "-DLEGACY_HIPBLAS_DIRECT=ON"]
extra_compile_args = {
"cxx": extra_cflags,
"nvcc": extra_cuda_cflags,
}
setup(
name="exllamav2_kernels",
ext_modules=[
CUDAExtension(
name="exllamav2_kernels",
sources=[
"exllamav2_kernels/ext.cpp",
"exllamav2_kernels/cuda/q_matrix.cu",
"exllamav2_kernels/cuda/q_gemm.cu",
],
extra_compile_args=extra_compile_args,
)
],
cmdclass={"build_ext": BuildExtension},
)
| text-generation-inference/server/exllamav2_kernels/setup.py/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/setup.py",
"repo_id": "text-generation-inference",
"token_count": 424
} |
import torch
from text_generation_server.layers import (
TensorParallelEmbedding,
)
class ProcessGroup:
def __init__(self, rank: int, world_size: int):
self._rank = rank
self.world_size = world_size
def size(self) -> int:
return self.world_size
def rank(self) -> int:
return self._rank
class Weights:
def __init__(self, rank: int, world_size: int, vocab_size: int, hidden_dim: int):
self.weight = (
torch.arange(vocab_size * hidden_dim).float().view(vocab_size, hidden_dim)
)
self.process_group = ProcessGroup(rank, world_size)
def get_partial_sharded(self, name: str, dim: int):
assert dim == 0
rank = self.process_group.rank()
world_size = self.process_group.size()
size = self.weight.shape[dim]
block_size = (size + world_size - 1) // world_size
start = rank * block_size
stop = (rank + 1) * block_size
return self.weight[start:stop]
def get_shape(self, name: str):
return self.weight.shape
def test_weight_hub_files_offline_error():
vocab_size = 17
weights = Weights(
rank=0,
world_size=1,
vocab_size=vocab_size,
hidden_dim=256,
)
embeddings = TensorParallelEmbedding("", weights)
input_ids = torch.arange(vocab_size)
output = embeddings.forward(input_ids)
assert embeddings.min_id == 0
assert embeddings.max_id == 17
torch.testing.assert_close(output, torch.arange(256 * 17).float().view(17, 256))
weights_0_2 = Weights(rank=0, world_size=2, vocab_size=vocab_size, hidden_dim=256)
weights_1_2 = Weights(rank=1, world_size=2, vocab_size=vocab_size, hidden_dim=256)
embeddings_0_2 = TensorParallelEmbedding("", weights_0_2, reduce=False)
assert embeddings_0_2.min_id == 0
assert embeddings_0_2.max_id == 9
torch.testing.assert_close(
embeddings_0_2.weight,
torch.cat([torch.arange(9 * 256), torch.zeros(256)], dim=0)
.view(10, 256)
.float(),
)
embeddings_1_2 = TensorParallelEmbedding("", weights_1_2, reduce=False)
assert embeddings_1_2.min_id == 9
assert embeddings_1_2.max_id == 17
torch.testing.assert_close(
embeddings_1_2.weight,
torch.cat([torch.arange(8 * 256) + 9 * 256, torch.zeros(256)], dim=0)
.view(9, 256)
.float(),
)
output_tp_0 = embeddings_0_2.forward(input_ids)
output_tp_1 = embeddings_1_2.forward(input_ids)
torch.testing.assert_close(output, output_tp_0 + output_tp_1)
| text-generation-inference/server/tests/utils/test_layers.py/0 | {
"file_path": "text-generation-inference/server/tests/utils/test_layers.py",
"repo_id": "text-generation-inference",
"token_count": 1146
} |
#!/usr/bin/env python
"""
Fused Attention
===============
This is a Triton implementation of the Flash Attention v2 algorithm from Tri Dao
(https://tridao.me/publications/flash2/flash2.pdf)
Credits: OpenAI kernel team, AMD ML Frameworks Triton team
Features supported:
1) Fwd with causal masking
2) Any sequence lengths without padding (currently fwd kernel only)
3) Support for different sequence lengths for q and k
4) Nested tensor API currently does not support dropout or bias.
Not currently supported:
1) Non power of two head dims
"""
import torch
import triton
import triton.language as tl
torch_dtype: tl.constexpr = torch.float16
@triton.jit
def cdiv_fn(x, y):
return (x + y - 1) // y
@triton.jit
def max_fn(x, y):
return tl.math.max(x, y)
@triton.jit
def dropout_offsets(philox_seed, philox_offset, dropout_p, m, n, stride):
ms = tl.arange(0, m)
ns = tl.arange(0, n)
return philox_offset + ms[:, None] * stride + ns[None, :]
@triton.jit
def dropout_rng(philox_seed, philox_offset, dropout_p, m, n, stride):
rng_offsets = dropout_offsets(
philox_seed, philox_offset, dropout_p, m, n, stride
).to(tl.uint32)
# TODO: use tl.randint for better performance
return tl.rand(philox_seed, rng_offsets)
@triton.jit
def dropout_mask(philox_seed, philox_offset, dropout_p, m, n, stride):
rng_output = dropout_rng(philox_seed, philox_offset, dropout_p, m, n, stride)
rng_keep = rng_output > dropout_p
return rng_keep
@triton.jit
def load_fn(block_ptr, first, second, pad):
if first and second:
tensor = tl.load(block_ptr, boundary_check=(0, 1), padding_option=pad)
elif first:
tensor = tl.load(block_ptr, boundary_check=(0,), padding_option=pad)
elif second:
tensor = tl.load(block_ptr, boundary_check=(1,), padding_option=pad)
else:
tensor = tl.load(block_ptr)
return tensor
@triton.jit
def _attn_fwd_inner(
acc,
l_i,
m_i,
q,
K_block_ptr,
V_block_ptr,
start_m,
actual_seqlen_k,
dropout_p,
philox_seed,
batch_philox_offset,
encoded_softmax_block_ptr,
block_min,
block_max,
offs_n_causal,
masked_blocks,
n_extra_tokens,
bias_ptr,
IS_CAUSAL: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
OFFS_M: tl.constexpr,
OFFS_N: tl.constexpr,
PRE_LOAD_V: tl.constexpr,
MASK_STEPS: tl.constexpr,
ENABLE_DROPOUT: tl.constexpr,
RETURN_ENCODED_SOFTMAX: tl.constexpr,
PADDED_HEAD: tl.constexpr,
):
# loop over k, v, and update accumulator
for start_n in range(block_min, block_max, BLOCK_N):
# For padded blocks, we will overrun the tensor size if
# we load all BLOCK_N. For others, the blocks are all within range.
k = load_fn(
K_block_ptr,
PADDED_HEAD,
MASK_STEPS and (n_extra_tokens != 0),
"zero",
)
if PRE_LOAD_V:
v = load_fn(
V_block_ptr,
MASK_STEPS and (n_extra_tokens != 0),
PADDED_HEAD,
"zero",
)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
# We start from end of seqlen_k so only the first iteration would need
# to be checked for padding if it is not a multiple of block_n
# TODO: This can be optimized to only be true for the padded block.
if MASK_STEPS: # noqa: SIM102
# If this is the last block / iteration, we want to
# mask if the sequence length is not a multiple of block size
# a solution is to always do BLOCK_M // BLOCK_N + 1 steps
# if not is_modulo_mn. last step might get wasted but that is okay.
# check if this masking works for that case.
if (start_n + BLOCK_N == block_max) and (n_extra_tokens != 0):
boundary_m = tl.full([BLOCK_M], actual_seqlen_k, dtype=tl.int32)
size_n = start_n + OFFS_N[None, :]
mask = size_n < boundary_m[:, None]
qk = tl.where(mask, qk, float("-inf"))
if IS_CAUSAL:
causal_boundary = start_n + offs_n_causal
causal_mask = OFFS_M[:, None] >= causal_boundary[None, :]
qk = tl.where(causal_mask, qk, float("-inf"))
# -- compute qk ----
qk += tl.dot(q, k)
if bias_ptr is not None:
bias = load_fn(
bias_ptr, False, MASK_STEPS and (n_extra_tokens != 0), "zero"
)
# While bias is added after multiplying qk with sm_scale, our
# optimization to use 2^x instead of e^x results in an additional
# scale factor of log2(e) which we must also multiply the bias with.
qk += bias * 1.44269504089
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk = qk - m_ij[:, None]
p = tl.math.exp2(qk)
# CAVEAT: Must update l_ij before applying dropout
l_ij = tl.sum(p, 1)
if ENABLE_DROPOUT:
philox_offset = (
batch_philox_offset
+ start_m * BLOCK_M * actual_seqlen_k
+ start_n
- BLOCK_N
)
keep = dropout_mask(
philox_seed,
philox_offset,
dropout_p,
BLOCK_M,
BLOCK_N,
actual_seqlen_k,
)
if RETURN_ENCODED_SOFTMAX:
tl.store(
encoded_softmax_block_ptr,
tl.where(keep, p, -p).to(encoded_softmax_block_ptr.type.element_ty),
)
p = tl.where(keep, p, 0.0)
elif RETURN_ENCODED_SOFTMAX:
tl.store(
encoded_softmax_block_ptr,
p.to(encoded_softmax_block_ptr.type.element_ty),
)
# -- update output accumulator --
alpha = tl.math.exp2(m_i - m_ij)
acc = acc * alpha[:, None]
if not PRE_LOAD_V:
v = load_fn(
V_block_ptr,
MASK_STEPS and (n_extra_tokens != 0),
PADDED_HEAD,
"zero",
)
# -- update m_i and l_i
l_i = l_i * alpha + l_ij
# update m_i and l_i
m_i = m_ij
acc += tl.dot(p.to(V_block_ptr.type.element_ty), v)
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
if bias_ptr is not None:
bias_ptr = tl.advance(bias_ptr, (0, BLOCK_N))
if RETURN_ENCODED_SOFTMAX:
encoded_softmax_block_ptr = tl.advance(
encoded_softmax_block_ptr, (0, BLOCK_N)
)
return acc, l_i, m_i
@triton.autotune(
configs=[
triton.Config(
{
"BLOCK_M": 256,
"BLOCK_N": 64,
"waves_per_eu": 2,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=8,
),
triton.Config(
{
"BLOCK_M": 128,
"BLOCK_N": 128,
"waves_per_eu": 2,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=4,
),
triton.Config(
{
"BLOCK_M": 256,
"BLOCK_N": 128,
"waves_per_eu": 2,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=8,
),
triton.Config(
{
"BLOCK_M": 128,
"BLOCK_N": 64,
"waves_per_eu": 3,
"PRE_LOAD_V": True,
},
num_stages=1,
num_warps=4,
),
triton.Config(
{
"BLOCK_M": 128,
"BLOCK_N": 64,
"waves_per_eu": 3,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=4,
),
triton.Config(
{
"BLOCK_M": 64,
"BLOCK_N": 64,
"waves_per_eu": 4,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=8,
),
triton.Config(
{
"BLOCK_M": 32,
"BLOCK_N": 32,
"waves_per_eu": 4,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=8,
),
# TODO: This config fails with head_size not pow2 with data mismatches.
# triton.Config({'BLOCK_M': 32, 'BLOCK_N': 16, 'waves_per_eu': 1,
# 'PRE_LOAD_V': False}, num_stages=1, num_warps=4),
triton.Config(
{
"BLOCK_M": 16,
"BLOCK_N": 16,
"waves_per_eu": 1,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=4,
),
triton.Config(
{
"BLOCK_M": 128,
"BLOCK_N": 64,
"waves_per_eu": 1,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=4,
),
],
key=["IS_CAUSAL", "dropout_p", "BLOCK_DMODEL"],
)
@triton.jit
def attn_fwd(
Q,
K,
V,
bias,
sm_scale,
L,
Out,
stride_qz,
stride_qh,
stride_qm,
stride_qk,
stride_kz,
stride_kh,
stride_kn,
stride_kk,
stride_vz,
stride_vh,
stride_vk,
stride_vn,
stride_oz,
stride_oh,
stride_om,
stride_on,
stride_bz,
stride_bh,
stride_bm,
stride_bn,
cu_seqlens_q,
cu_seqlens_k,
dropout_p,
philox_seed,
philox_offset_base,
encoded_softmax,
HQ: tl.constexpr,
HK: tl.constexpr,
ACTUAL_BLOCK_DMODEL: tl.constexpr,
MAX_SEQLENS_Q: tl.constexpr,
MAX_SEQLENS_K: tl.constexpr,
VARLEN: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
PRE_LOAD_V: tl.constexpr,
BIAS_TYPE: tl.constexpr,
ENABLE_DROPOUT: tl.constexpr,
RETURN_ENCODED_SOFTMAX: tl.constexpr,
):
start_m = tl.program_id(0)
off_h_q = tl.program_id(1)
off_z = tl.program_id(2)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
if VARLEN:
cu_seqlens_q_start = tl.load(cu_seqlens_q + off_z)
cu_seqlens_q_end = tl.load(cu_seqlens_q + off_z + 1)
seqlen_q = cu_seqlens_q_end - cu_seqlens_q_start
# We have a one-size-fits-all grid in id(0). Some seqlens might be too
# small for all start_m so for those we return early.
if start_m * BLOCK_M > seqlen_q:
return
cu_seqlens_k_start = tl.load(cu_seqlens_k + off_z)
cu_seqlens_k_end = tl.load(cu_seqlens_k + off_z + 1)
seqlen_k = cu_seqlens_k_end - cu_seqlens_k_start
else:
cu_seqlens_q_start = 0
cu_seqlens_k_start = 0
seqlen_q = MAX_SEQLENS_Q
seqlen_k = MAX_SEQLENS_K
# Now we compute whether we need to exit early due to causal masking.
# This is because for seqlen_q > seqlen_k, M rows of the attn scores
# are completely masked, resulting in 0s written to the output, and
# inf written to LSE. We don't need to do any GEMMs in this case.
# This block of code determines what N is, and if this WG is operating
# on those M rows.
n_blocks = cdiv_fn(seqlen_k, BLOCK_N)
if IS_CAUSAL:
# If seqlen_q == seqlen_k, the attn scores are a square matrix.
# If seqlen_q != seqlen_k, attn scores are rectangular which means
# the causal mask boundary is bottom right aligned, and ends at either
# the top edge (seqlen_q < seqlen_k) or left edge.
# This captures the decrease in n_blocks if we have a rectangular attn
# matrix
n_blocks_seqlen = cdiv_fn(
(start_m + 1) * BLOCK_M + seqlen_k - seqlen_q, BLOCK_N
)
# This is what adjusts the block_max for the current WG, only
# if IS_CAUSAL. Otherwise we want to always iterate through all n_blocks
n_blocks = min(n_blocks, n_blocks_seqlen)
# If we have no blocks after adjusting for seqlen deltas, this WG is
# part of the blocks that are all 0. We exit early.
if n_blocks <= 0:
o_offset = (
off_z * stride_oz + cu_seqlens_q_start * stride_om + off_h_q * stride_oh
)
O_block_ptr = tl.make_block_ptr(
base=Out + o_offset,
shape=(seqlen_q, BLOCK_DMODEL),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0),
)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=Out.type.element_ty)
# We still need to write 0s to the result
# tl.store(O_block_ptr,
# acc.to(Out.type.element_ty), boundary_check=(0,1))
# l_ptrs = L + off_z * hq * MAX_SEQLENS_Q + off_h_q * MAX_SEQLENS_Q
# + offs_m
# We store inf to LSE, not -inf because in the bwd pass,
# we subtract this
# from qk which makes it -inf, such that exp(qk - inf) = 0
# for these masked blocks.
# l = tl.full([BLOCK_M], value=float("inf"), dtype=tl.float32)
# tl.store(l_ptrs, l)
# TODO: Should dropout and return encoded softmax be handled here?
return
# If MQA / GQA, set the K and V head offsets appropriately.
GROUP_SIZE: tl.constexpr = HQ // HK
if GROUP_SIZE != 1:
off_h_k = off_h_q // GROUP_SIZE
else:
off_h_k = off_h_q
n_extra_tokens = 0
if seqlen_k < BLOCK_N:
n_extra_tokens = BLOCK_N - seqlen_k
elif seqlen_k % BLOCK_N:
n_extra_tokens = seqlen_k % BLOCK_N
PADDED_HEAD: tl.constexpr = ACTUAL_BLOCK_DMODEL != BLOCK_DMODEL
# Compute pointers for all the tensors used in this kernel.
q_offset = off_z * stride_qz + off_h_q * stride_qh + cu_seqlens_q_start * stride_qm
Q_block_ptr = tl.make_block_ptr(
base=Q + q_offset,
shape=(seqlen_q, ACTUAL_BLOCK_DMODEL),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0),
)
k_offset = off_z * stride_kz + off_h_k * stride_kh + cu_seqlens_k_start * stride_kn
K_block_ptr = tl.make_block_ptr(
base=K + k_offset,
shape=(ACTUAL_BLOCK_DMODEL, seqlen_k),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(BLOCK_DMODEL, BLOCK_N),
order=(0, 1),
)
v_offset = off_z * stride_vz + off_h_k * stride_vh + cu_seqlens_k_start * stride_vk
V_block_ptr = tl.make_block_ptr(
base=V + v_offset,
shape=(seqlen_k, ACTUAL_BLOCK_DMODEL),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, BLOCK_DMODEL),
order=(1, 0),
)
if BIAS_TYPE != 0:
bias_ptr = tl.make_block_ptr(
base=bias + off_h_q * stride_bh,
shape=(seqlen_q, seqlen_k),
strides=(stride_bm, stride_bn),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_N),
order=(1, 0),
)
else:
bias_ptr = None
if ENABLE_DROPOUT:
batch_philox_offset = (
philox_offset_base + (off_z * HQ + off_h_q) * seqlen_q * seqlen_k
)
else:
batch_philox_offset = 0
# We can ask to return the dropout mask without actually doing any dropout.
# In this case, we return an invalid pointer so indicate the mask is not i
# valid.
# TODO: Fix encoded softmax. It currently uses just h_q in the base offset.
if RETURN_ENCODED_SOFTMAX:
encoded_softmax_block_ptr = tl.make_block_ptr(
base=encoded_softmax + off_h_q * seqlen_q * seqlen_k,
shape=(seqlen_q, seqlen_k),
strides=(seqlen_k, 1),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_N),
order=(1, 0),
)
else:
encoded_softmax_block_ptr = 0
# initialize pointer to m and l
m_i = tl.full([BLOCK_M], float("-inf"), dtype=tl.float32)
l_i = tl.full([BLOCK_M], 1.0, dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# scale sm_scale by log_2(e) and use 2^x in the loop as we do not
# have native e^x support in HW.
qk_scale = sm_scale * 1.44269504089
# Q is loaded once at the beginning and shared by all N blocks.
q = load_fn(Q_block_ptr, True, PADDED_HEAD, "zero")
q = (q * qk_scale).to(Q_block_ptr.type.element_ty)
# Here we compute how many full and masked blocks we have.
padded_block_k = n_extra_tokens != 0
is_modulo_mn = not padded_block_k and (seqlen_q % BLOCK_M == 0)
if IS_CAUSAL:
# There are always at least BLOCK_M // BLOCK_N masked blocks.
# Additionally there might be one more due to dissimilar seqlens.
masked_blocks = BLOCK_M // BLOCK_N + (not is_modulo_mn)
else:
# Padding on Q does not need to be masked in the FA loop.
masked_blocks = padded_block_k
# if IS_CAUSAL, not is_modulo_mn does not always result in an additional
# block. In this case we might exceed n_blocks so pick the min.
masked_blocks = min(masked_blocks, n_blocks)
n_full_blocks = n_blocks - masked_blocks
block_min = 0
block_max = n_blocks * BLOCK_N
# Compute for full blocks. Here we set causal to false regardless of its
# value because there is no masking. Similarly we do not need padding.
if n_full_blocks > 0:
block_max = (n_blocks - masked_blocks) * BLOCK_N
acc, l_i, m_i = _attn_fwd_inner(
acc,
l_i,
m_i,
q,
K_block_ptr,
V_block_ptr,
start_m,
seqlen_k,
dropout_p,
philox_seed,
batch_philox_offset,
encoded_softmax_block_ptr,
# _, _, offs_n_causal, masked_blocks, n_extra_tokens, _
block_min,
block_max,
0,
0,
0,
bias_ptr,
# IS_CAUSAL, ....
False,
BLOCK_M,
BLOCK_DMODEL,
BLOCK_N,
offs_m,
offs_n,
# _, MASK_STEPS, ...
PRE_LOAD_V,
False,
ENABLE_DROPOUT,
RETURN_ENCODED_SOFTMAX,
PADDED_HEAD,
)
block_min = block_max
block_max = n_blocks * BLOCK_N
tl.debug_barrier()
# Remaining blocks, if any, are full / not masked.
if masked_blocks > 0:
offs_n_causal = offs_n + (seqlen_q - seqlen_k) if IS_CAUSAL else 0
K_block_ptr = tl.advance(K_block_ptr, (0, n_full_blocks * BLOCK_N))
V_block_ptr = tl.advance(V_block_ptr, (n_full_blocks * BLOCK_N, 0))
if bias_ptr is not None:
bias_ptr = tl.advance(bias_ptr, (0, n_full_blocks * BLOCK_N))
if RETURN_ENCODED_SOFTMAX:
encoded_softmax_block_ptr = tl.advance(
encoded_softmax_block_ptr, (0, n_full_blocks)
)
acc, l_i, m_i = _attn_fwd_inner(
acc,
l_i,
m_i,
q,
K_block_ptr,
V_block_ptr,
start_m,
seqlen_k,
dropout_p,
philox_seed,
batch_philox_offset,
encoded_softmax_block_ptr,
block_min,
block_max,
offs_n_causal,
masked_blocks,
n_extra_tokens,
bias_ptr,
IS_CAUSAL,
BLOCK_M,
BLOCK_DMODEL,
BLOCK_N,
offs_m,
offs_n,
# _, MASK_STEPS, ...
PRE_LOAD_V,
True,
ENABLE_DROPOUT,
RETURN_ENCODED_SOFTMAX,
PADDED_HEAD,
)
# epilogue
acc = acc / l_i[:, None]
if ENABLE_DROPOUT:
acc = acc / (1 - dropout_p)
# If seqlen_q > seqlen_k but the delta is not a multiple of BLOCK_M,
# then we have one block with a row of all NaNs which come from computing
# softmax over a row of all -infs (-inf - inf = NaN). We check for that here
# and store 0s where there are NaNs as these rows should've been zeroed out.
end_m_idx = (start_m + 1) * BLOCK_M
start_m_idx = start_m * BLOCK_M
causal_start_idx = seqlen_q - seqlen_k
acc = acc.to(Out.type.element_ty)
if IS_CAUSAL: # noqa: SIM102
if causal_start_idx > start_m_idx and causal_start_idx < end_m_idx:
out_mask_boundary = tl.full(
(BLOCK_DMODEL,), causal_start_idx, dtype=tl.int32
)
mask_m_offsets = start_m_idx + tl.arange(0, BLOCK_M)
out_ptrs_mask = mask_m_offsets[:, None] >= out_mask_boundary[None, :]
z = 0.0
acc = tl.where(out_ptrs_mask, acc, z.to(acc.type.element_ty))
# write back LSE
# l_ptrs = L + off_z * hq * MAX_SEQLENS_Q + off_h_q * MAX_SEQLENS_Q + offs_m
# If seqlen_q not multiple of BLOCK_M, we need to mask out the last
# few rows. This is only true for the last M block. For others,
# overflow_size will be -ve
# overflow_size = end_m_idx - seqlen_q
# if overflow_size > 0:
# boundary = tl.full((BLOCK_M,), BLOCK_M - overflow_size, dtype=tl.int32)
# # This is a > check because mask being 0 blocks the store.
# l_ptrs_mask = boundary > tl.arange(0, BLOCK_M)
# tl.store(l_ptrs, m_i + tl.math.log2(l_i), mask=l_ptrs_mask)
# else:
# tl.store(l_ptrs, m_i + tl.math.log2(l_i))
# write back O
o_offset = off_z * stride_oz + cu_seqlens_q_start * stride_om + off_h_q * stride_oh
O_block_ptr = tl.make_block_ptr(
base=Out + o_offset,
shape=(seqlen_q, ACTUAL_BLOCK_DMODEL),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0),
)
# Need boundary check on this to make sure the padding from the
# Q and KV tensors in both dims are not part of what we store back.
# TODO: Do the boundary check optionally.
tl.store(O_block_ptr, acc, boundary_check=(0, 1))
def check_args(
q,
k,
v,
o,
varlen=True,
max_seqlens=None,
cu_seqlens_q=None,
cu_seqlens_k=None,
):
assert q.dim() == k.dim() and q.dim() == v.dim()
if varlen:
assert q.dim() == 3
total_q, nheads_q, head_size = q.shape
total_k, nheads_k, _ = k.shape
assert cu_seqlens_q is not None
assert cu_seqlens_k is not None
assert len(cu_seqlens_q) == len(cu_seqlens_k)
else:
assert q.dim() == 4
batch, nheads_q, seqlen_q, head_size = q.shape
_, nheads_k, seqlen_k, _ = k.shape
assert max_seqlens > 0
assert k.shape == v.shape
assert q.shape[-1] == k.shape[-1] and q.shape[-1] == v.shape[-1]
# TODO: Change assert if we support qkl f8 and v f16
assert q.dtype == k.dtype and q.dtype == v.dtype
# TODO: Fix assert to check head size <=256 once supported
assert head_size <= 128
assert o.shape == q.shape
assert (nheads_q % nheads_k) == 0
class _attention(torch.autograd.Function):
@staticmethod
def forward(
ctx,
q,
k,
v,
o,
cu_seqlens_q,
cu_seqlens_k,
max_seqlens_q,
max_seqlens_k,
causal=False,
sm_scale=1.0,
bias=None,
):
if o is None:
o = torch.empty_like(q, dtype=v.dtype)
check_args(
q,
k,
v,
o,
varlen=True,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
)
if True: # varlen
total_q, nheads_q, head_size = q.shape
total_k, nheads_k, _ = k.shape
batch = len(cu_seqlens_q) - 1
q_strides = (0, q.stride(1), q.stride(0), q.stride(2))
k_strides = (0, k.stride(1), k.stride(0), k.stride(2))
v_strides = (0, v.stride(1), v.stride(0), v.stride(2))
o_strides = (0, o.stride(1), o.stride(0), o.stride(2))
else:
batch, seqlen_q, nheads_q, head_size = q.shape
_, seqlen_k, nheads_k, _ = k.shape
q_strides = (q.stride(0), q.stride(2), q.stride(1), q.stride(3))
k_strides = (k.stride(0), k.stride(2), k.stride(1), k.stride(3))
v_strides = (v.stride(0), v.stride(2), v.stride(1), v.stride(3))
o_strides = (o.stride(0), o.stride(2), o.stride(1), o.stride(3))
# Get closest power of 2 over or equal to 32.
padded_d_model = 1 << (head_size - 1).bit_length()
padded_d_model = max(padded_d_model, 16)
def grid(META):
return triton.cdiv(max_seqlens_q, META["BLOCK_M"]), nheads_q, batch
encoded_softmax = None
# Seed the RNG so we get reproducible results for testing.
philox_seed = 0x1BF52
philox_offset = 0x1D4B42
if bias is not None:
bias_strides = (
bias.stride(0),
bias.stride(1),
bias.stride(2),
bias.stride(3),
)
else:
bias_strides = (0, 0, 0, 0)
attn_fwd[grid](
q,
k,
v,
bias,
sm_scale,
None,
o,
*q_strides,
*k_strides,
*v_strides,
*o_strides,
*bias_strides,
cu_seqlens_q,
cu_seqlens_k,
dropout_p=0.0,
philox_seed=philox_seed,
philox_offset_base=philox_offset,
encoded_softmax=encoded_softmax,
HQ=nheads_q,
HK=nheads_k,
ACTUAL_BLOCK_DMODEL=head_size,
MAX_SEQLENS_Q=max_seqlens_q,
MAX_SEQLENS_K=max_seqlens_k,
IS_CAUSAL=causal,
VARLEN=True,
BLOCK_DMODEL=padded_d_model,
BIAS_TYPE=0 if bias is None else 1,
ENABLE_DROPOUT=False,
RETURN_ENCODED_SOFTMAX=False,
)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.BLOCK_DMODEL = head_size
ctx.causal = causal
ctx.dropout_p = 0.0
ctx.philox_seed = philox_seed
ctx.philox_offset = philox_offset
ctx.encoded_softmax = encoded_softmax
ctx.return_encoded_softmax = False
return o, encoded_softmax
triton_attention = _attention.apply
| text-generation-inference/server/text_generation_server/layers/attention/flash_attn_triton.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/attention/flash_attn_triton.py",
"repo_id": "text-generation-inference",
"token_count": 14692
} |
from accelerate import init_empty_weights
import torch
@classmethod
def load_conv2d(cls, prefix, weights, in_channels, out_channels, kernel_size, stride):
weight = weights.get_tensor(f"{prefix}.weight")
bias = weights.get_tensor(f"{prefix}.bias")
with init_empty_weights():
conv2d = cls(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
)
conv2d.weight = torch.nn.Parameter(weight)
conv2d.bias = torch.nn.Parameter(bias)
return conv2d
@classmethod
def load_conv2d_no_bias(
cls, prefix, weights, in_channels, out_channels, kernel_size, stride
):
weight = weights.get_tensor(f"{prefix}.weight")
with init_empty_weights():
conv2d = cls(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
)
conv2d.weight = torch.nn.Parameter(weight)
conv2d.bias = None
return conv2d
torch.nn.Conv2d.load = load_conv2d
torch.nn.Conv2d.load_no_bias = load_conv2d_no_bias
| text-generation-inference/server/text_generation_server/layers/conv.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/conv.py",
"repo_id": "text-generation-inference",
"token_count": 518
} |
from typing import Optional
import torch
import torch.nn as nn
from text_generation_server.layers.fp8 import fp8_quantize
from text_generation_server.layers.marlin.gptq import _check_valid_shape
from text_generation_server.layers.marlin.util import (
_check_marlin_kernels,
permute_scales,
)
try:
import marlin_kernels
except ImportError:
marlin_kernels = None
MARLIN_TILE_SIZE = 16
class GPTQMarlinFP8Linear(nn.Module):
"""
FP8 GPTQ-Marlin linear layer.
"""
def __init__(
self,
qweight: torch.Tensor,
scales: torch.Tensor,
bias: Optional[torch.Tensor],
) -> None:
super().__init__()
_check_marlin_kernels()
assert marlin_kernels is not None
scales = scales.unsqueeze(0)
if scales.shape[1] == 1:
out_features, in_features = qweight.shape
scales = scales.repeat(1, out_features)
qweight, scales = repack_fp8_for_marlin(qweight, scales)
in_features = qweight.shape[0] * MARLIN_TILE_SIZE
out_features = scales.shape[1]
_check_valid_shape(in_features=in_features, out_features=out_features)
self.qweight = qweight
self.scales = scales
self.bias = bias if bias is not None else None
self.workspace = torch.zeros(
out_features // 64 * 16, dtype=torch.int, device=qweight.device
)
@classmethod
def from_unquant(cls, weight, bias, dtype):
qweight, scales = fp8_quantize(weight)
return cls(qweight=qweight, scales=scales.to(dtype), bias=bias)
@classmethod
def from_fp8(
cls,
weight: torch.Tensor,
scale: torch.Tensor,
bias: torch.Tensor,
dtype: torch.dtype,
**kwargs,
):
return cls(qweight=weight, scales=scale.to(dtype), bias=bias)
def forward(self, A: torch.Tensor) -> torch.Tensor:
assert marlin_kernels is not None
A_flat = A.view(-1, A.shape[-1])
C = marlin_kernels.fp8_marlin_gemm(
A_flat,
self.qweight,
self.scales,
self.workspace,
8,
A_flat.shape[0],
self.scales.shape[1],
A_flat.shape[1],
)
C = C.reshape(A.shape[:-1] + (self.scales.shape[1],))
if self.bias is not None:
C += self.bias
return C
def pack_fp8_as_int32(fp8_tensor: torch.Tensor) -> torch.Tensor:
"""
Repack FP8 weights to gptq format (packed int32 elements).
"""
assert fp8_tensor.dtype == torch.float8_e4m3fn
if fp8_tensor.shape[0] % 4 != 0:
raise ValueError(
f"Leading tensor dimension is not divisable by 4: {fp8_tensor.shape[0]}"
)
# Reshape to prepare for packing
reshaped = fp8_tensor.reshape(-1, 4, *fp8_tensor.shape[1:])
# Convert fp8 to uint8 (byte) representation
byte_tensor = reshaped.view(torch.uint8)
# Pack 4 uint8 values into one int32
packed = torch.zeros(
fp8_tensor.shape[0] // 4,
fp8_tensor.shape[1],
dtype=torch.int32,
device=fp8_tensor.device,
)
for i in range(4):
packed.bitwise_or_(byte_tensor[:, i].to(torch.int32) << i * 8)
return packed
def repack_fp8_for_marlin(weight: torch.Tensor, scales: torch.Tensor):
"""
Repack FP8 tensor for GPTQ-Marlin.
"""
out_features, in_features = weight.shape
# Torch linear layers weights with shape [out_features, in_features],
# GPTQ-quantized weights use [in_feateres/pack_factor, in_features],
# so transpose before packing.
qweight = pack_fp8_as_int32(weight.t())
perm = torch.empty(0, dtype=torch.int, device=qweight.device)
repacked = marlin_kernels.gptq_marlin_repack(
qweight, perm, in_features, out_features, 8
)
scales = permute_scales(scales)
return repacked, scales
| text-generation-inference/server/text_generation_server/layers/marlin/fp8.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/marlin/fp8.py",
"repo_id": "text-generation-inference",
"token_count": 1806
} |
import torch
import time
import torch.distributed
from dataclasses import dataclass
from opentelemetry import trace
from transformers import (
AutoConfig,
AutoTokenizer,
AutoModelForCausalLM,
PreTrainedTokenizerBase,
)
from typing import Optional, Tuple, List, Type, Dict
from text_generation_server.utils import (
initialize_torch_distributed,
weight_files,
Weights,
)
from text_generation_server.models import Model
from text_generation_server.utils.chunks import concat_text_chunks
from text_generation_server.utils.import_utils import SYSTEM
from text_generation_server.utils.quantization import get_loader
from text_generation_server.utils.tokens import batch_top_tokens
from text_generation_server.models.types import (
Batch,
Tokens,
Generation,
GeneratedText,
)
from text_generation_server.pb import generate_pb2
from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling
tracer = trace.get_tracer(__name__)
@dataclass
class CausalLMBatch(Batch):
batch_id: int
requests: List[generate_pb2.Request]
requests_idx_mapping: Dict[int, int]
# Decoder values
input_ids: torch.Tensor
attention_mask: torch.Tensor
position_ids: torch.Tensor
past_key_values: Optional[List[Tuple]]
# All tokens
all_input_ids: List[torch.Tensor]
# Lengths of all generations present in the batch
input_lengths: List[int]
prefix_offsets: List[int]
read_offsets: List[int]
# Generation helpers
next_token_choosers: List[NextTokenChooser]
stopping_criterias: List[StoppingCriteria]
top_n_tokens: List[int]
top_n_tokens_tensor: torch.Tensor
# Metadata used for padding
max_input_length: int
padding_right_offset: int
# Maximum number of tokens this batch will grow to
max_tokens: int
# Past metadata
keys_head_dim_last: bool = True
def to_pb(self) -> generate_pb2.CachedBatch:
return generate_pb2.CachedBatch(
id=self.batch_id,
request_ids=[r.id for r in self.requests],
size=len(self),
max_tokens=self.max_tokens,
current_tokens=len(self.input_ids),
)
@classmethod
def from_pb(
cls,
pb: generate_pb2.Batch,
tokenizer: PreTrainedTokenizerBase,
dtype: torch.dtype,
device: torch.device,
) -> "CausalLMBatch":
inputs = []
next_token_choosers = []
stopping_criterias = []
top_n_tokens = []
prefix_offsets = []
read_offsets = []
requests_idx_mapping = {}
# Parse batch
max_truncation = 0
padding_right_offset = 0
max_decode_tokens = 0
for i, r in enumerate(pb.requests):
requests_idx_mapping[r.id] = i
inputs.append(concat_text_chunks(r.input_chunks.chunks))
next_token_choosers.append(
NextTokenChooser.from_pb(r.parameters, device, tokenizer)
)
stopping_criteria = StoppingCriteria.from_pb(
r.stopping_parameters, tokenizer
)
stopping_criterias.append(stopping_criteria)
top_n_tokens.append(r.top_n_tokens)
max_truncation = max(max_truncation, r.truncate)
max_decode_tokens += stopping_criteria.max_new_tokens
padding_right_offset = max(
padding_right_offset, stopping_criteria.max_new_tokens
)
tokenized_inputs = tokenizer(
inputs,
return_tensors="pt",
padding=True,
return_token_type_ids=False,
truncation=True,
max_length=max_truncation,
).to(device)
for _ in pb.requests:
input_len = tokenized_inputs["input_ids"].shape[1]
prefix_offsets.append(input_len - 5)
read_offsets.append(input_len)
input_lengths = tokenized_inputs["attention_mask"].sum(1)
max_input_length = input_lengths.max()
input_ids = tokenized_inputs["input_ids"]
# Allocate maximum attention_mask
attention_mask = input_ids.new_zeros(
(pb.size, max_input_length + padding_right_offset)
)
# Copy tokenizer attention_mask into fully allocated attention_mask
attention_mask[:, :max_input_length] = tokenized_inputs["attention_mask"]
position_ids = tokenized_inputs["attention_mask"].long().cumsum(-1) - 1
position_ids.masked_fill_(tokenized_inputs["attention_mask"] == 0, 1)
all_input_ids = tokenized_inputs["input_ids"].T.split(1, dim=1)
top_n_tokens_tensor = torch.tensor(
top_n_tokens, device=device, dtype=torch.int64
)
max_tokens = len(inputs) * (max_input_length + max_decode_tokens)
return cls(
batch_id=pb.id,
requests=pb.requests,
requests_idx_mapping=requests_idx_mapping,
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=None,
all_input_ids=list(all_input_ids),
input_lengths=input_lengths.tolist(),
prefix_offsets=prefix_offsets,
read_offsets=read_offsets,
next_token_choosers=next_token_choosers,
stopping_criterias=stopping_criterias,
top_n_tokens=top_n_tokens,
top_n_tokens_tensor=top_n_tokens_tensor,
max_input_length=max_input_length.item(),
padding_right_offset=padding_right_offset,
max_tokens=max_tokens,
)
@tracer.start_as_current_span("filter")
def filter(self, request_ids: List[int]) -> Optional["CausalLMBatch"]:
if len(request_ids) == 0:
raise ValueError("Batch must have at least one request")
if len(request_ids) == len(self):
return self
keep_indices = []
# New values after filtering
requests_idx_mapping = {}
requests = []
input_lengths = []
prefix_offsets = []
read_offsets = []
all_input_ids = []
max_input_length = 0
next_token_choosers = []
stopping_criterias = []
top_n_tokens = []
total_remaining_decode_tokens = 0
new_padding_right_offset = 0
for i, request_id in enumerate(request_ids):
idx = self.requests_idx_mapping[request_id]
requests_idx_mapping[request_id] = i
keep_indices.append(idx)
requests.append(self.requests[idx])
prefix_offsets.append(self.prefix_offsets[idx])
read_offsets.append(self.read_offsets[idx])
all_input_ids.append(self.all_input_ids[idx])
request_input_length = self.input_lengths[idx]
input_lengths.append(request_input_length)
max_input_length = max(max_input_length, request_input_length)
next_token_choosers.append(self.next_token_choosers[idx])
stopping_criteria = self.stopping_criterias[idx]
stopping_criterias.append(stopping_criteria)
top_n_tokens.append(self.top_n_tokens[idx])
remaining_decode_tokens = (
stopping_criteria.max_new_tokens - stopping_criteria.current_tokens
)
total_remaining_decode_tokens += remaining_decode_tokens
new_padding_right_offset = max(
new_padding_right_offset, remaining_decode_tokens
)
# Apply indices to input_ids, attention mask, past key values and other items that need to be cached
input_ids = self.input_ids[keep_indices]
position_ids = self.position_ids[keep_indices]
self.attention_mask = self.attention_mask[
keep_indices,
-(self.padding_right_offset + max_input_length) : (
self.attention_mask.shape[1] - self.padding_right_offset
)
+ new_padding_right_offset,
]
# Ensure that past_key_values tensors can be updated in-place
if type(self.past_key_values[0]) is tuple:
self.past_key_values = [list(layer) for layer in self.past_key_values]
# Update tensors in-place to allow incremental garbage collection
past_kv_length = max_input_length - 1
for layer in self.past_key_values:
past_keys, past_values = layer
if len(past_keys.shape) == 3:
# Force past to be of dim [self_size, num_heads, ...] for easy indexing
past_keys = past_keys.view(len(self), -1, *past_keys.shape[-2:])
past_values = past_values.view(len(self), -1, *past_values.shape[-2:])
if self.keys_head_dim_last:
layer[0] = past_keys[keep_indices, :, -past_kv_length:, :]
else:
layer[0] = past_keys[keep_indices, :, :, -past_kv_length:]
del past_keys
layer[1] = past_values[keep_indices, :, -past_kv_length:, :]
del past_values
top_n_tokens_tensor = self.top_n_tokens_tensor[keep_indices]
max_tokens = len(request_ids) * max_input_length + total_remaining_decode_tokens
self.requests = requests
self.requests_idx_mapping = requests_idx_mapping
self.input_ids = input_ids
self.position_ids = position_ids
self.all_input_ids = all_input_ids
self.input_lengths = input_lengths
self.prefix_offsets = prefix_offsets
self.read_offsets = read_offsets
self.next_token_choosers = next_token_choosers
self.stopping_criterias = stopping_criterias
self.top_n_tokens = top_n_tokens
self.top_n_tokens_tensor = top_n_tokens_tensor
self.max_input_length = max_input_length
self.padding_right_offset = new_padding_right_offset
self.max_tokens = max_tokens
return self
@classmethod
@tracer.start_as_current_span("concatenate")
def concatenate(cls, batches: List["CausalLMBatch"]) -> "CausalLMBatch":
# Used for padding
total_batch_size = 0
max_input_length = 0
padding_right_offset = 0
for batch in batches:
total_batch_size += len(batch)
max_input_length = max(max_input_length, batch.max_input_length)
padding_right_offset = max(padding_right_offset, batch.padding_right_offset)
# Batch attributes
requests = []
requests_idx_mapping = {}
input_lengths = []
prefix_offsets = []
read_offsets = []
all_input_ids = []
next_token_choosers = []
stopping_criterias = []
top_n_tokens = []
max_tokens = 0
# Batch tensors
input_ids = None
attention_mask = None
position_ids = None
past_key_values = []
top_n_tokens_tensor = None
# Used for slicing correctly inside the tensors
# Equivalent to a cumsum on batch sizes
start_index = 0
for i, batch in enumerate(batches):
requests.extend(batch.requests)
input_lengths.extend(batch.input_lengths)
prefix_offsets.extend(batch.prefix_offsets)
read_offsets.extend(batch.read_offsets)
all_input_ids.extend(batch.all_input_ids)
next_token_choosers.extend(batch.next_token_choosers)
stopping_criterias.extend(batch.stopping_criterias)
top_n_tokens.extend(batch.top_n_tokens)
if i == 0:
requests_idx_mapping = batch.requests_idx_mapping
else:
# We need to offset the mapping for each batch by the cumulative batch size
for k, v in batch.requests_idx_mapping.items():
requests_idx_mapping[k] = v + start_index
# Slicing end index for this batch
end_index = start_index + len(batch)
# We only concatenate batches that did at least one step
if batch.past_key_values is None:
raise ValueError("only concatenate prefilled batches")
# Create empty tensor
# input_ids is always of shape [batch_size, 1]
# We do not need to pad it
if input_ids is None:
input_ids = batch.input_ids.new_empty((total_batch_size, 1))
# Copy to correct indices
input_ids[start_index:end_index] = batch.input_ids
# Create padded tensor
if attention_mask is None:
attention_mask = batch.attention_mask.new_zeros(
(total_batch_size, max_input_length + padding_right_offset),
)
if top_n_tokens_tensor is None:
top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros(
total_batch_size,
)
top_n_tokens_tensor[start_index:end_index] = batch.top_n_tokens_tensor
# We need to slice the attention mask to remove padding from previous steps
# and to remove unused allocated space
left_offset = max_input_length - batch.max_input_length
batch_left_offset = (
batch.attention_mask.shape[1]
- batch.max_input_length
- batch.padding_right_offset
)
attention_mask[
start_index:end_index,
left_offset:-padding_right_offset,
] = batch.attention_mask[
:,
batch_left_offset : -batch.padding_right_offset,
]
# Create empty tensor
# position_ids is always of shape [batch_size, 1]
if position_ids is None:
position_ids = batch.position_ids.new_empty((total_batch_size, 1))
position_ids[start_index:end_index] = batch.position_ids
# Shenanigans to get dimensions because BLOOM outputs a past with a different shape
# BLOOM Keys: [batch_size * num_heads, head_dim, seq_length]
# BLOOM Values: [batch_size * num_heads, seq_length, head_dim]
# And ensure that we can update tensors in-place
if isinstance(batch.past_key_values[0], tuple):
batch.past_key_values = [
[t.view(len(batch), -1, *t.shape[-2:]) for t in layer]
for layer in batch.past_key_values
]
elif len(batch.past_key_values[0][0].shape) == 3:
for layer in batch.past_key_values:
for k, t in enumerate(layer):
layer[k] = t.view(len(batch), -1, *t.shape[-2:])
# Add eventual padding tokens that were added while concatenating
max_tokens += batch.max_tokens + (
max_input_length - batch.max_input_length
) * len(batch)
start_index = end_index
first_past_kvs = batches[0].past_key_values
_, num_heads, padded_sequence_length, head_dim = first_past_kvs[0][1].shape
padded_past_values_shape = (
total_batch_size,
num_heads,
max_input_length - 1,
head_dim,
)
if batches[0].keys_head_dim_last:
padded_past_keys_shape = padded_past_values_shape
else:
# seq_length is last for BLOOM
padded_past_keys_shape = (
total_batch_size,
num_heads,
head_dim,
max_input_length - 1,
)
# Iterate over attention layers
# Concatenate past key values layer by layer to allow incremental garbage collection
for j in range(len(first_past_kvs)):
padded_past_keys = first_past_kvs[j][0].new_zeros(padded_past_keys_shape)
start_index = 0
for batch in batches:
past_keys = batch.past_key_values[j][0]
# Clear reference to the original tensor
batch.past_key_values[j][0] = None
# Slicing end index for this batch
end_index = start_index + len(batch)
# We slice the keys to remove the padding from previous batches
past_seq_len = batch.max_input_length - 1
if batch.keys_head_dim_last:
padded_past_keys[start_index:end_index, :, -past_seq_len:, :] = (
past_keys[:, :, -past_seq_len:, :]
)
else:
# BLOOM case
padded_past_keys[start_index:end_index, :, :, -past_seq_len:] = (
past_keys[:, :, :, -past_seq_len:]
)
del past_keys
start_index = end_index
padded_past_values = first_past_kvs[j][1].new_zeros(
padded_past_values_shape
)
start_index = 0
for batch in batches:
past_values = batch.past_key_values[j][1]
# Clear reference to the original tensor
batch.past_key_values[j][1] = None
# Slicing end index for this batch
end_index = start_index + len(batch)
# We slice the past values to remove the padding from previous batches
past_seq_len = batch.max_input_length - 1
padded_past_values[start_index:end_index, :, -past_seq_len:, :] = (
past_values[:, :, -past_seq_len:, :]
)
del past_values
# Update values
start_index = end_index
past_key_values.append([padded_past_keys, padded_past_values])
return cls(
batch_id=batches[0].batch_id,
requests=requests,
requests_idx_mapping=requests_idx_mapping,
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
all_input_ids=all_input_ids,
input_lengths=input_lengths,
prefix_offsets=prefix_offsets,
read_offsets=read_offsets,
next_token_choosers=next_token_choosers,
stopping_criterias=stopping_criterias,
top_n_tokens=top_n_tokens,
top_n_tokens_tensor=top_n_tokens_tensor,
max_input_length=max_input_length,
padding_right_offset=padding_right_offset,
keys_head_dim_last=batches[0].keys_head_dim_last,
max_tokens=max_tokens,
)
def __len__(self):
return len(self.requests)
@dataclass
class CausalLMBatchKeysLast(CausalLMBatch):
keys_head_dim_last: bool = False
class CausalLM(Model):
def __init__(
self,
model_id: str,
model_class,
revision: Optional[str] = None,
quantize: Optional[str] = None,
speculator: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
default_dtype=torch.float16,
trust_remote_code: bool = False,
tokenizer_class=AutoTokenizer,
config_class=AutoConfig,
batch_class=CausalLMBatch,
):
self.quantize = quantize
self.batch_class = batch_class
self.process_group, rank, world_size = initialize_torch_distributed()
if torch.cuda.is_available():
device = torch.device(f"cuda:{rank}")
dtype = default_dtype if dtype is None else dtype
elif hasattr(torch, "xpu") and torch.xpu.is_available():
device = torch.device(f"xpu:{rank}")
dtype = default_dtype if dtype is None else dtype
elif SYSTEM == "ipex":
device = torch.device("cpu")
# Float16 doesn't exist on target.
dtype = torch.bfloat16 if dtype is None else dtype
else:
device = torch.device("cpu")
dtype = torch.float32 if dtype is None else dtype
tokenizer = tokenizer_class.from_pretrained(
model_id,
revision=revision,
padding_side="left",
truncation_side="left",
trust_remote_code=trust_remote_code,
)
config = config_class.from_pretrained(
model_id,
revision=revision,
trust_remote_code=trust_remote_code,
)
config.quantize = quantize
config.speculator = speculator
if tokenizer.pad_token_id is None:
if config.pad_token_id is not None:
tokenizer.pad_token_id = config.pad_token_id
elif config.eos_token_id is not None:
tokenizer.pad_token_id = config.eos_token_id
elif tokenizer.eos_token_id is not None:
tokenizer.pad_token_id = tokenizer.eos_token_id
torch.distributed.barrier(group=self.process_group)
weights_loader = get_loader(
quantize=quantize, model_id=model_id, revision=revision
)
filenames = weight_files(model_id, revision=revision, extension=".safetensors")
weights = Weights(
filenames,
device=device,
dtype=dtype,
process_group=self.process_group,
weights_loader=weights_loader,
)
prefix = ""
model = model_class(prefix, config, weights)
torch.distributed.barrier(group=self.process_group)
super().__init__(
model_id=model_id,
model=model,
tokenizer=tokenizer,
requires_padding=True,
dtype=dtype,
device=device,
rank=rank,
world_size=world_size,
)
@classmethod
def fallback(
cls,
model_id: str,
revision: Optional[str] = None,
quantize: Optional[str] = None,
speculator: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
trust_remote_code: bool = False,
):
if speculator:
raise RuntimeError("Speculator decoding is not enabled for AutoModel")
device_count = 0
if torch.cuda.is_available():
device = torch.device("cuda")
device_count = torch.cuda.device_count()
dtype = torch.float16 if dtype is None else dtype
elif hasattr(torch, "xpu") and torch.xpu.is_available():
device = torch.device("xpu")
device_count = torch.xpu.device_count()
dtype = torch.float16 if dtype is None else dtype
else:
if quantize:
raise ValueError("quantization is not available on CPU")
device = torch.device("cpu")
dtype = torch.float32 if dtype is None else dtype
tokenizer = AutoTokenizer.from_pretrained(
model_id,
revision=revision,
padding_side="left",
truncation_side="left",
trust_remote_code=trust_remote_code,
)
model = AutoModelForCausalLM.from_pretrained(
model_id,
revision=revision,
torch_dtype=dtype,
device_map=("auto" if device_count > 1 else None),
load_in_8bit=quantize == "bitsandbytes",
trust_remote_code=trust_remote_code,
)
if device_count == 1 and quantize != "bitsandbytes":
model = model.to(device)
if tokenizer.pad_token_id is None:
if model.config.pad_token_id is not None:
tokenizer.pad_token_id = model.config.pad_token_id
elif model.config.eos_token_id is not None and isinstance(
model.config.eos_token_id, int
):
tokenizer.pad_token_id = model.config.eos_token_id
elif tokenizer.eos_token_id is not None:
tokenizer.pad_token_id = tokenizer.eos_token_id
else:
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
self = cls.__new__(
cls,
)
self.batch_class = CausalLMBatch
super().__init__(
self,
model_id=model_id,
model=model,
tokenizer=tokenizer,
requires_padding=True,
dtype=dtype,
device=device,
)
self.quantize = quantize
return self
@property
def batch_type(self) -> Type[CausalLMBatch]:
return self.batch_class
def forward(
self, input_ids, attention_mask, position_ids, past_key_values: Optional = None
) -> Tuple[
torch.Tensor, Optional[torch.Tensor], List[Tuple[torch.Tensor, torch.Tensor]]
]:
# Model Forward
kwargs = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"past_key_values": past_key_values,
"use_cache": True,
"return_dict": True,
}
if self.has_position_ids:
kwargs["position_ids"] = position_ids
outputs = self.model.forward(**kwargs)
if isinstance(outputs, tuple):
outputs, speculative_logits = outputs
else:
speculative_logits = None
return outputs.logits, speculative_logits, outputs.past_key_values
@tracer.start_as_current_span("generate_token")
def generate_token(
self, batch: CausalLMBatch
) -> Tuple[List[Generation], Optional[CausalLMBatch], Tuple[int, int]]:
start = time.time_ns()
# slice the attention mask to the correct shape
attention_mask = batch.attention_mask[:, : -batch.padding_right_offset]
logits, speculative_logits, past = self.forward(
batch.input_ids,
attention_mask,
batch.position_ids,
batch.past_key_values,
)
# Results
generations: List[Generation] = []
stopped = True
# Speculation is not active for causal
accepted_ids = torch.ones_like(batch.input_ids)[:, 0]
batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens(
batch.top_n_tokens,
batch.top_n_tokens_tensor,
torch.log_softmax(logits[:, -1], -1),
accepted_ids,
)
start_decode = time.time_ns()
# Zipped iterator
iterator = zip(
batch.requests,
batch.input_lengths,
batch.prefix_offsets,
batch.read_offsets,
logits,
batch.next_token_choosers,
batch.stopping_criterias,
batch.all_input_ids,
batch.top_n_tokens,
batch_top_token_ids,
batch_top_token_logprobs,
)
# For each member of the batch
for i, (
request,
input_length,
prefix_offset,
read_offset,
logits,
next_token_chooser,
stopping_criteria,
all_input_ids,
top_n_tokens,
top_token_ids,
top_token_logprobs,
) in enumerate(iterator):
# Select next token
next_token_id, logprobs = next_token_chooser(
all_input_ids.view(1, -1), logits[-1:, :]
)
# Append next token to all tokens
all_input_ids = torch.cat([all_input_ids, next_token_id])
new_input_length = input_length + 1
# Generated token
next_token_logprob = logprobs[-1, next_token_id]
next_token_id_squeezed = next_token_id.squeeze()
next_token_text, prefix_offset, read_offset = self.decode_token(
all_input_ids[:, 0], prefix_offset, read_offset
)
# Evaluate stopping criteria
stop, reason = stopping_criteria(
next_token_id_squeezed,
next_token_text,
)
if not stop:
stopped = False
# Shard generations
# All generations will be appended in the rust sharded client
if i % self.world_size == self.rank:
if stop:
# Decode generated tokens
output_text, _, _ = self.decode_token(
all_input_ids[:, 0],
prefix_offset=len(all_input_ids)
- stopping_criteria.current_tokens
- 1,
read_offset=len(all_input_ids)
- stopping_criteria.current_tokens,
skip_special_tokens=True,
)
# Get seed
if isinstance(next_token_chooser.choice, Sampling):
seed = next_token_chooser.choice.seed
else:
seed = None
generated_text = GeneratedText(
output_text, stopping_criteria.current_tokens, reason, seed
)
else:
generated_text = None
# Prefill
if stopping_criteria.current_tokens == 1 and request.prefill_logprobs:
# Remove generated token to only have prefill and add nan for first prompt token
prefill_logprobs = [float("nan")] + torch.log_softmax(
logits, -1
).gather(1, all_input_ids[1:]).squeeze(1)[
-new_input_length:-1
].tolist()
prefill_token_ids = all_input_ids[-new_input_length:-1]
prefill_texts = self.tokenizer.batch_decode(
prefill_token_ids,
clean_up_tokenization_spaces=False,
skip_special_tokens=False,
)
prefill_tokens = Tokens(
prefill_token_ids,
prefill_logprobs,
prefill_texts,
is_special=[],
)
else:
prefill_tokens = None
if top_n_tokens > 0:
all_top_tokens = []
for top_token_ids, top_token_logprobs in zip(
top_token_ids, top_token_logprobs
):
toptoken_texts = self.tokenizer.batch_decode(
top_token_ids,
clean_up_tokenization_spaces=False,
skip_special_tokens=False,
)
special_toptokens = [
token_id in self.all_special_ids
for token_id in top_token_ids
]
top_tokens = Tokens(
top_token_ids,
top_token_logprobs,
toptoken_texts,
special_toptokens,
)
all_top_tokens.append(top_tokens)
top_tokens = all_top_tokens
else:
top_tokens = None
generation = Generation(
request.id,
prefill_tokens,
Tokens(
[next_token_id_squeezed],
[next_token_logprob],
[next_token_text],
[next_token_id_squeezed.item() in self.all_special_ids],
),
generated_text,
top_tokens,
)
generations.append(generation)
# Update values
batch.next_token_choosers[i] = batch.next_token_choosers[i].advance_grammar(
next_token_id_squeezed.item()
)
batch.input_ids[i, 0] = next_token_id
batch.all_input_ids[i] = all_input_ids
batch.input_lengths[i] = new_input_length
batch.prefix_offsets[i] = prefix_offset
batch.read_offsets[i] = read_offset
batch.max_input_length = max(batch.max_input_length, new_input_length)
# We finished all generations in the batch; there is no next batch
if stopped:
forward_ns = start_decode - start
decode_ns = time.time_ns() - start_decode
return generations, None, (forward_ns, decode_ns)
# Slice unused values from prefill
batch.input_ids = batch.input_ids[:, :1]
# Update attention_mask as we added a new token to input_ids
batch.attention_mask[:, -batch.padding_right_offset] = 1
# Decrease right offset
batch.padding_right_offset -= 1
# Update position_ids
batch.position_ids = batch.position_ids[:, -1:] + 1
# Update past key values
batch.past_key_values = past
forward_ns = start_decode - start
decode_ns = time.time_ns() - start_decode
return generations, batch, (forward_ns, decode_ns)
| text-generation-inference/server/text_generation_server/models/causal_lm.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/causal_lm.py",
"repo_id": "text-generation-inference",
"token_count": 16985
} |
# coding=utf-8
# Copyright 2024 HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed
from torch import nn
from typing import Optional, List, Tuple
from text_generation_server.layers.tensor_parallel import TensorParallelColumnLinear
from text_generation_server.layers.attention import Seqlen
from text_generation_server.models.custom_modeling.vlm import (
load_text_model,
load_vision_model,
)
class PaliGemmaForConditionalGeneration(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
config.vision_config.quantize = config.quantize
self.vision_tower = load_vision_model(
prefix="vision_tower" if not prefix else f"{prefix}.vision_tower",
config=config.vision_config,
weights=weights,
)
self.post_vision_tower_layernorm = nn.LayerNorm.load(
prefix="vision_tower.vision_model.post_layernorm",
weights=weights,
eps=config.vision_config.layer_norm_eps,
)
self.multi_modal_projector = TensorParallelColumnLinear.load(
config,
prefix="multi_modal_projector.linear",
weights=weights,
bias=True,
)
self.vocab_size = config.text_config.vocab_size
self.config = config
text_config = config.text_config
text_config.speculator = config.speculator
text_config.quantize = config.quantize
self.text_model = load_text_model(
prefix="language_model" if not prefix else f"{prefix}.language_model",
config=config.text_config,
weights=weights,
)
self.pad_token_id = (
config.pad_token_id if config.pad_token_id is not None else -1
)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor] = None,
lm_head_indices: Optional[torch.Tensor] = None,
pixel_values: torch.FloatTensor = None,
# Unused here
pixel_attention_mask: Optional[torch.BoolTensor] = None,
image_sizes: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
inputs_embeds = self.text_model.embed_tokens(input_ids)
# TODO This is odd but apparently pali gemma position ids start at 1.
if cu_seqlen_prefill is not None:
max_s += 1
position_ids += 1
if pixel_values is not None:
pixel_values = pixel_values.to(dtype=inputs_embeds.dtype)
image_outputs = self.vision_tower(pixel_values)
last_hidden_state = self.post_vision_tower_layernorm(
image_outputs.last_hidden_state
)
image_features = self.multi_modal_projector(last_hidden_state)
# mask where image or padding tokens
mask = input_ids == self.config.image_token_index
# insert image features into input embeddings
inputs_embeds[mask] = image_features.view(-1, image_features.shape[-1])
hidden_states = self.text_model.model(
inputs_embeds=inputs_embeds,
position_ids=position_ids,
cu_seqlen_prefill=cu_seqlen_prefill,
kv_cache=kv_cache,
block_tables=block_tables,
slots=slots,
seqlen=seqlen,
max_s=max_s,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits, speculative_logits = self.text_model.lm_head(hidden_states)
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 1969
} |
import torch
import torch.distributed
from mamba_ssm.ops.triton.selective_state_update import selective_state_update
from mamba_ssm.ops.selective_scan_interface import selective_scan_fn
from torch import nn
from typing import Optional, Tuple, Any
from transformers.configuration_utils import PretrainedConfig
import torch.nn.functional as F
from text_generation_server.layers import (
SpeculativeHead,
TensorParallelEmbedding,
FastLinear,
)
from text_generation_server.layers.layernorm import FastRMSNorm
from einops import rearrange
from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
import math
from dataclasses import dataclass
@dataclass
class InferenceParams:
"""Inference parameters that are passed to the main model in order
to efficienly calculate and store the context during inference."""
max_seqlen: int
max_batch_size: int
conv_states: torch.Tensor
ssm_states: torch.Tensor
seqlen_offset: int
class MambaConfig(PretrainedConfig):
def __init__(
self,
vocab_size=50280,
d_model=768,
d_state=16,
n_layer=32,
layer_norm_epsilon=1e-5,
tie_word_embeddings=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
expand=2,
dt_rank="auto",
**kwargs,
):
self.vocab_size = vocab_size
self.n_layer = n_layer
self.layer_norm_epsilon = layer_norm_epsilon
self.d_model = d_model
self.d_inner = d_model * 2
self.d_conv = 4
self.d_state = d_state
self.expand = expand
self.dt_rank = math.ceil(self.d_model / 16) if dt_rank == "auto" else dt_rank
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
class MambaBlock(nn.Module):
def __init__(self, prefix, config, weights, layer_id):
super().__init__()
self.layer_id = layer_id
self.in_proj = FastLinear.load(config, f"{prefix}.in_proj", weights, bias=False)
self.x_proj = FastLinear.load(config, f"{prefix}.x_proj", weights, bias=False)
self.dt_proj = FastLinear.load(config, f"{prefix}.dt_proj", weights, bias=True)
self.dt_proj_no_bias = FastLinear.load(
config, f"{prefix}.dt_proj", weights, bias=False
)
self.out_proj = FastLinear.load(
config, f"{prefix}.out_proj", weights, bias=False
)
self.conv1d = FastLinear.load(config, f"{prefix}.conv1d", weights, bias=True)
self.negA = -torch.exp(weights.get_tensor(f"{prefix}.A_log").float())
self.D = weights.get_tensor(f"{prefix}.D")
self.activation = "silu"
self.dt_rank = config.dt_rank
self.d_state = config.d_state
self.d_conv = config.d_conv
self.act = nn.SiLU()
# inference_params
def forward(self, hidden_states: torch.Tensor, inference_params=None):
if inference_params.seqlen_offset > 0:
conv_state = inference_params.conv_states[self.layer_id]
ssm_state = inference_params.ssm_states[self.layer_id]
out, conv_state, ssm_state = self.step(hidden_states, conv_state, ssm_state)
return out, conv_state, ssm_state
_, seqlen, _ = hidden_states.shape
projected_states = self.in_proj(hidden_states).transpose(1, 2)
# assert projected_states.shape == [batch_size, 2 * dstate, seqlen], f"{projected_states.shape} [{batch_size}, {dstate}, {seqlen}]"
x, z = projected_states.chunk(2, dim=1)
conv_state = F.pad(x, (self.d_conv - seqlen, 0))
x = causal_conv1d_fn(
x=x,
weight=self.conv1d.weight.squeeze(1),
bias=self.conv1d.bias,
activation=self.activation,
)
# We're careful here about the layout, to avoid extra transposes.
# We want dt to have d as the slowest moving dimension
# and L as the fastest moving dimension, since those are what the ssm_scan kernel expects.
x_dbl = self.x_proj(rearrange(x, "b d l -> (b l) d")) # (bl d)
dt, B, C = torch.split(
x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1
)
dt = self.dt_proj.weight @ dt.t()
dt = rearrange(dt, "d (b l) -> b d l", l=seqlen)
B = rearrange(B, "(b l) dstate -> b dstate l", l=seqlen).contiguous()
C = rearrange(C, "(b l) dstate -> b dstate l", l=seqlen).contiguous()
y, last_state = selective_scan_fn(
x,
dt,
self.negA,
B,
C,
self.D.float(),
z=z,
delta_bias=self.dt_proj.bias.float(),
delta_softplus=True,
return_last_state=True,
)
y = rearrange(y, "b d l -> b l d")
attn_outputs = self.out_proj(y)
return attn_outputs, conv_state, last_state
def step(self, hidden_states, conv_state, ssm_state):
xz = self.in_proj(hidden_states.squeeze(1))
x, z = xz.chunk(2, dim=-1) # (B D)
x = causal_conv1d_update(
x,
conv_state,
self.conv1d.weight.squeeze(1),
self.conv1d.bias,
self.activation,
)
x_db = self.x_proj(x) # (B dt_rank+2*d_state)
dt, B, C = torch.split(x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1)
dt = F.linear(dt, self.dt_proj.weight)
A = self.negA
y = selective_state_update(
ssm_state,
x,
dt,
A,
B,
C,
self.D,
z=z,
dt_bias=self.dt_proj.bias,
dt_softplus=True,
)
out = self.out_proj(y)
return out.unsqueeze(1), conv_state.clone(), ssm_state.clone()
class ResidualBlock(nn.Module):
def __init__(self, prefix, config, weights, layer_id):
super().__init__()
self.mamba_block = MambaBlock(
prefix=f"{prefix}.mixer", config=config, weights=weights, layer_id=layer_id
)
self.layer_norm = FastRMSNorm.load(
prefix=f"{prefix}.norm", weights=weights, eps=config.layer_norm_epsilon
)
def forward(
self,
hidden_states: torch.Tensor,
residual: Optional[torch.Tensor] = None,
inference_params: Optional[Any] = None,
):
residual = (hidden_states + residual) if residual is not None else hidden_states
shape = residual.shape
hidden_states, _ = self.layer_norm(residual.view(-1, shape[-1]))
hidden_states, conv_state, last_ssm_state = self.mamba_block(
hidden_states.view(*shape), inference_params
)
return hidden_states, residual, conv_state, last_ssm_state
class MambaModel(nn.Module):
def __init__(self, config, weights):
super().__init__()
prefix = "backbone"
try:
self.embed_tokens = TensorParallelEmbedding(f"{prefix}.embeddings", weights)
except RuntimeError:
self.embed_tokens = TensorParallelEmbedding(f"{prefix}.embedding", weights)
self.blocks = nn.ModuleList(
[
ResidualBlock(f"{prefix}.layers.{i}", config, weights, layer_id=i)
for i in range(config.n_layer)
]
)
self.norm_f = FastRMSNorm.load(
f"{prefix}.norm_f", weights, eps=config.layer_norm_epsilon
)
try:
self.lm_head = SpeculativeHead.load(config, f"{prefix}.embeddings", weights)
except RuntimeError:
self.lm_head = SpeculativeHead.load(config, f"{prefix}.embedding", weights)
self.config = config
def forward(
self, input_ids: torch.Tensor, inference_params=None, residual=None
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
hidden_states = self.embed_tokens(input_ids)
for i, block in enumerate(self.blocks):
hidden_states, residual, conv_state, ssm_state = block(
hidden_states, residual, inference_params
)
inference_params.conv_states[i].copy_(conv_state)
inference_params.ssm_states[i].copy_(ssm_state)
hidden_states = (
hidden_states + residual if residual is not None else hidden_states
)
hidden_states, _ = self.norm_f(hidden_states.view(-1, hidden_states.size(-1)))
hidden_states = hidden_states.view(residual.shape)
logits, speculative_logits = self.lm_head(hidden_states)
# update the offset for the next inference using these params
inference_params.seqlen_offset += input_ids.size(1)
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/mamba_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/mamba_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 4238
} |
import torch
import numpy as np
from typing import Iterable, Optional, Tuple, List, Dict
from text_generation_server.pb.generate_pb2 import Request
from io import BytesIO
from PIL import Image
from dataclasses import dataclass
from opentelemetry import trace
from transformers import (
PreTrainedTokenizerBase,
)
from text_generation_server.models.vlm_causal_lm import VlmCausalLMBatch, VlmCausalLM
from text_generation_server.pb import generate_pb2
from text_generation_server.models.globals import PREFIX_CACHING, ATTENTION
from text_generation_server.layers.attention import Seqlen
from text_generation_server.models.metadata_kernels import block_tables_to_ragged
tracer = trace.get_tracer(__name__)
@dataclass
class MllamaCausalLMBatch(VlmCausalLMBatch):
image_indices: List[int] = 42
aspect_ratio_ids: Optional[torch.Tensor] = None
aspect_ratio_mask: Optional[torch.Tensor] = None
cross_attention_states: Optional[torch.Tensor] = None
@classmethod
@tracer.start_as_current_span("concatenate")
def concatenate(cls, batches):
batch = super().concatenate(batches)
batch.pixel_values = None
batch.pixel_attention_mask = None
offset = 0
image_indices = []
attention_states = []
for b in batches:
if b.cross_attention_states is not None:
attention_states.append(b.cross_attention_states)
image_indices.extend([i + offset for i in b.image_indices])
offset += len(b.image_indices)
if len(attention_states) > 0:
assert len(image_indices) > 0
batch.cross_attention_states = torch.cat(attention_states, dim=0)
batch.image_indices = image_indices
else:
batch.cross_attention_states = None
batch.image_indices = []
return batch
@tracer.start_as_current_span("filter")
def filter(self, request_ids: List[int]):
assert self.image_indices is not None
batch = super().filter(request_ids)
assert self.image_indices is not None
indices = []
for i, request_id in enumerate(request_ids):
idx = self.requests_idx_mapping[request_id]
indices.append(idx)
offset = 0
new_image_indices = []
prev_i = None
for i in self.image_indices:
if i in indices:
new_image_indices.append(offset)
if i != prev_i:
offset += 1
prev_i = i
batch.image_indices = new_image_indices
if len(new_image_indices) > 0:
assert max(new_image_indices) < self.cross_attention_states.shape[0]
assert offset <= self.cross_attention_states.shape[0]
batch.cross_attention_states = self.cross_attention_states[
new_image_indices
]
else:
batch.cross_attention_states = None
return batch
@classmethod
def batch_tokenized_inputs(
cls, requests: Iterable[Request], tokenizer, processor, config
):
image_inputs = []
texts = []
image_indices = []
batch_tokenized_inputs = []
for i, r in enumerate(requests):
# Each input is encoded into a list, where each element of this input list is either a string or a URL
curr_text = ""
curr_image = None
curr_i = None
for chunk in r.input_chunks.chunks:
chunk_type = chunk.WhichOneof("chunk")
if chunk_type == "text":
curr_text += chunk.text
elif chunk_type == "image":
image = Image.open(BytesIO(chunk.image.data))
# TODO unsure about BOS
curr_text += "<|image|>"
image_input = processor.image_processor(image, return_tensors="pt")
curr_image = image_input
curr_i = i
# image_inputs.append(image_input)
# image_indices.append(i)
else:
raise RuntimeError(f"Invalid chunk type {chunk_type}")
texts.append(curr_text)
if curr_image is not None:
image_inputs.append(curr_image)
image_indices.append(curr_i)
input_ids = tokenizer(
curr_text,
truncation=True,
max_length=r.truncate,
add_special_tokens=r.add_special_tokens,
)["input_ids"]
batch_tokenized_inputs.append(input_ids)
if image_inputs:
image_input = image_inputs[0]
new_image_inputs = {
"pixel_values": torch.cat(
[img["pixel_values"] for img in image_inputs], dim=0
),
}
if "aspect_ratio_ids" in image_input:
new_image_inputs["aspect_ratio_ids"] = torch.cat(
[img["aspect_ratio_ids"] for img in image_inputs], dim=0
)
if "aspect_ratio_mask" in image_input:
new_image_inputs["aspect_ratio_mask"] = torch.cat(
[img["aspect_ratio_mask"] for img in image_inputs], dim=0
)
image_inputs = new_image_inputs
image_inputs["image_indices"] = image_indices
else:
image_inputs = None
if image_inputs is not None:
assert len(image_indices) == image_inputs["pixel_values"].shape[0]
return batch_tokenized_inputs, image_inputs
@classmethod
def from_pb_processor(
cls,
pb: generate_pb2.Batch,
tokenizer: PreTrainedTokenizerBase,
processor,
config,
dtype: torch.dtype,
device: torch.device,
) -> "VlmCausalLMBatch":
batch_tokenized_inputs, image_inputs = cls.batch_tokenized_inputs(
pb.requests, tokenizer, processor, config
)
batch = cls.from_tokenized(pb, tokenizer, batch_tokenized_inputs, dtype, device)
# XXX: <|image|> token is actually out of bounds and bugs out the logit processors.
batch.all_input_ids_tensor = batch.all_input_ids_tensor.clamp(
max=config.text_config.vocab_size - 1
)
if isinstance(batch.input_ids, list):
if len(batch) > 1:
input_ids = np.concatenate(batch.input_ids, dtype=np.int64)
else:
input_ids = batch.input_ids[0]
batch.input_ids = torch.tensor(input_ids, dtype=torch.int64, device=device)
batch.input_ids = batch.input_ids.clamp(max=config.text_config.vocab_size - 1)
if image_inputs is not None:
batch.pixel_values = image_inputs["pixel_values"].to(
device=device, dtype=dtype
)
batch.aspect_ratio_ids = image_inputs["aspect_ratio_ids"].to(device=device)
batch.aspect_ratio_mask = image_inputs["aspect_ratio_mask"].to(
device=device
)
batch.image_indices = image_inputs["image_indices"]
else:
batch.pixel_values = None
batch.aspect_ratio_ids = None
batch.aspect_ratio_mask = None
batch.image_indices = []
assert batch.image_indices is not None
return batch
class MllamaCausalLM(VlmCausalLM):
def forward(
self,
batch: MllamaCausalLMBatch,
adapter_data: Optional[Dict[str, torch.Tensor]] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
# Model Forward
if batch.speculative_ids is not None:
input_ids = batch.input_ids
position_ids = batch.position_ids
cu_seqlen_prefill = batch.cu_seqlen_prefill
kv_cache = self.kv_cache
block_tables = batch.block_tables_tensor
slots = batch.slots[batch.slot_indices]
input_lengths = batch.input_lengths_tensor
max_s = batch.max_current_length
lm_head_indices = batch.prefill_head_indices
speculative_ids = batch.speculative_ids
B, speculative_length = speculative_ids.shape
new_length = speculative_length + 1
new_input_ids = torch.cat(
[input_ids.unsqueeze(-1), speculative_ids], dim=1
).reshape(-1)
arange = torch.arange(new_length, device=position_ids.device).unsqueeze(0)
arange_int = arange.to(dtype=torch.int32)
new_position_ids = (
position_ids.unsqueeze(-1).expand(B, new_length) + arange
).view(-1)
slots = (slots.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1)
input_lengths = (
input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int
).view(-1)
cache_lengths_tensor = (
batch.cache_lengths_tensor.unsqueeze(-1).expand(B, new_length)
).reshape(-1)
# Add Copy the block tables for all members
block_tables = (
block_tables.unsqueeze(1)
.expand(B, new_length, -1)
.reshape(B * new_length, -1)
.contiguous()
)
max_s = max_s + speculative_length
input_ids = new_input_ids
position_ids = new_position_ids
else:
input_ids = batch.input_ids
position_ids = batch.position_ids
cu_seqlen_prefill = batch.cu_seqlen_prefill
kv_cache = self.kv_cache
block_tables = batch.block_tables_tensor
slots = batch.slots[batch.slot_indices]
input_lengths = batch.input_lengths_tensor
cache_lengths_tensor = batch.cache_lengths_tensor
max_s = batch.max_current_length
lm_head_indices = batch.prefill_head_indices
if cu_seqlen_prefill is None and self.max_past() is not None:
# In decode, not prefill, we're actually overwriting the KV-cache
# in a circular buffer mode.
# This makes sure the max_s for the decode pass is correct.
max_s = min(self.max_past(), max_s)
# Try to find an associated cuda graph
bs = input_ids.shape[0]
sorted_padded_bs = sorted([k for k in self.cuda_graphs.keys() if k >= bs])
if sorted_padded_bs:
# Get associated cuda graph
cuda_graph = self.cuda_graphs[sorted_padded_bs[0]]
else:
cuda_graph = None
if (
cu_seqlen_prefill is not None
or cuda_graph is None
# Only run cuda graphs when there's no images.
or batch.cross_attention_states is not None
):
if PREFIX_CACHING:
block_tables = block_tables_to_ragged(
block_tables=block_tables,
input_lengths=batch.input_lengths,
cache_lengths=batch.cache_lengths,
input_lengths_tensor=batch.input_lengths_tensor,
cache_lengths_tensor=batch.cache_lengths_tensor,
max_current_length=batch.max_current_length,
)
with self._forward_context(
block_tables=block_tables,
cu_seqlen_prefill=cu_seqlen_prefill,
input_lengths_tensor=input_lengths,
cache_lengths_tensor=cache_lengths_tensor,
):
seqlen = Seqlen(
input_lengths=input_lengths,
cache_lengths=cache_lengths_tensor,
cu_seqlen_q=cu_seqlen_prefill,
max_q=batch.max_input_length,
max_k=batch.max_current_length,
)
if batch.pixel_values is not None:
cross_attention_states = self.model.vision_forward(
pixel_values=batch.pixel_values,
aspect_ratio_ids=batch.aspect_ratio_ids,
aspect_ratio_mask=batch.aspect_ratio_mask,
)
batch.cross_attention_states = cross_attention_states
cross_attention_states = batch.cross_attention_states
logits, speculative_logits = self.model.forward(
input_ids=input_ids,
position_ids=position_ids,
cu_seqlen_prefill=cu_seqlen_prefill,
kv_cache=kv_cache,
block_tables=block_tables,
slots=slots,
seqlen=seqlen,
max_s=max_s,
prefill_cache_indices=batch.prefill_cache_indices,
lm_head_indices=lm_head_indices,
cross_attention_states=cross_attention_states,
adapter_data=adapter_data,
image_indices=batch.image_indices[:],
)
if batch.prefill_cache_indices is not None:
batch.prefill_cache_indices = None
if batch.pixel_values is not None:
batch.pixel_values = None
return logits, speculative_logits
# Copy inputs to the static inputs of the cuda graph
# Static inputs are potentially padded
cuda_graph["input_ids"][: input_ids.shape[0]] = input_ids
cuda_graph["position_ids"][: position_ids.shape[0]] = position_ids
if ATTENTION == "flashinfer":
block_tables = block_tables_to_ragged(
block_tables=block_tables,
input_lengths=batch.input_lengths,
cache_lengths=batch.cache_lengths,
input_lengths_tensor=batch.input_lengths_tensor,
cache_lengths_tensor=batch.cache_lengths_tensor,
max_current_length=batch.max_current_length,
)
cuda_graph["block_tables"][: block_tables.shape[0]] = block_tables
else:
cuda_graph["block_tables"][
: block_tables.shape[0], : block_tables.shape[1]
] = block_tables
# XXX: This is working only because block 0 is reserved for the healthcheck
# so it doesn't matter if we override it with bogus values.
cuda_graph["slots"].fill_(0)
cuda_graph["slots"][: slots.shape[0]] = slots
cuda_graph["input_lengths"].zero_()
cuda_graph["input_lengths"][: input_lengths.shape[0]] = input_lengths
cuda_graph["cache_lengths"].zero_()
cuda_graph["cache_lengths"][
: cache_lengths_tensor.shape[0]
] = cache_lengths_tensor
with self._forward_context(
block_tables=cuda_graph["block_tables"],
cu_seqlen_prefill=None,
input_lengths_tensor=cuda_graph["input_lengths"],
cache_lengths_tensor=cuda_graph["cache_lengths"],
state=cuda_graph["state"],
):
# Replay the graph
cuda_graph["graph"].replay()
# Slice output to the correct shape
speculative_logits = (
cuda_graph["speculative_logits"][:bs]
if cuda_graph["speculative_logits"] is not None
else None
)
logits = cuda_graph["logits"][:bs]
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/models/mllama_causal_lm.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/mllama_causal_lm.py",
"repo_id": "text-generation-inference",
"token_count": 7887
} |
import torch
from loguru import logger
import os
import importlib.util
def is_ipex_available():
return importlib.util.find_spec("intel_extension_for_pytorch") is not None
def get_cuda_free_memory(device, memory_fraction):
total_free_memory, _ = torch.cuda.mem_get_info(device)
total_gpu_memory = torch.cuda.get_device_properties(device).total_memory
free_memory = max(0, total_free_memory - (1 - memory_fraction) * total_gpu_memory)
return free_memory
def get_xpu_free_memory(device, memory_fraction):
total_memory = torch.xpu.get_device_properties(device).total_memory
device_id = device.index
memory_fraction = float(os.getenv("XPU_MEMORY_FRACTION", "1.0"))
free_memory = max(
0,
int(
total_memory * 0.9 * memory_fraction - torch.xpu.memory_reserved(device_id)
),
)
return free_memory
def get_cpu_free_memory(device, memory_fraction):
import psutil
from text_generation_server.utils.dist import WORLD_SIZE
mem = psutil.virtual_memory()
free_memory = int(mem.available * 0.95 / WORLD_SIZE)
return free_memory
def noop(*args, **kwargs):
pass
SYSTEM = None
if torch.version.hip is not None:
SYSTEM = "rocm"
empty_cache = torch.cuda.empty_cache
synchronize = torch.cuda.synchronize
get_free_memory = get_cuda_free_memory
elif torch.version.cuda is not None and torch.cuda.is_available():
SYSTEM = "cuda"
empty_cache = torch.cuda.empty_cache
synchronize = torch.cuda.synchronize
get_free_memory = get_cuda_free_memory
elif is_ipex_available():
SYSTEM = "ipex"
import intel_extension_for_pytorch # noqa: F401
if hasattr(torch, "xpu") and torch.xpu.is_available():
empty_cache = torch.xpu.empty_cache
synchronize = torch.xpu.synchronize
get_free_memory = get_xpu_free_memory
else:
empty_cache = noop
synchronize = noop
get_free_memory = get_cpu_free_memory
elif hasattr(torch, "xpu") and torch.xpu.is_available():
SYSTEM = "xpu"
empty_cache = torch.xpu.empty_cache
synchronize = torch.xpu.synchronize
get_free_memory = get_xpu_free_memory
else:
SYSTEM = "cpu"
empty_cache = noop
synchronize = noop
get_free_memory = get_cpu_free_memory
logger.info(f"Detected system {SYSTEM}")
| text-generation-inference/server/text_generation_server/utils/import_utils.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/utils/import_utils.py",
"repo_id": "text-generation-inference",
"token_count": 932
} |
import subprocess
import argparse
import ast
import json
import os
TEMPLATE = """
# Supported Models
Text Generation Inference enables serving optimized models. The following sections list which models (VLMs & LLMs) are supported.
SUPPORTED_MODELS
If the above list lacks the model you would like to serve, depending on the model's pipeline type, you can try to initialize and serve the model anyways to see how well it performs, but performance isn't guaranteed for non-optimized models:
```python
# for causal LMs/text-generation models
AutoModelForCausalLM.from_pretrained(<model>, device_map="auto")
# or, for text-to-text generation models
AutoModelForSeq2SeqLM.from_pretrained(<model>, device_map="auto")
```
If you wish to serve a supported model that already exists on a local folder, just point to the local folder.
```bash
text-generation-launcher --model-id <PATH-TO-LOCAL-BLOOM>
```
"""
def check_cli(check: bool):
output = subprocess.check_output(["text-generation-launcher", "--help"]).decode(
"utf-8"
)
wrap_code_blocks_flag = "<!-- WRAP CODE BLOCKS -->"
final_doc = f"# Text-generation-launcher arguments\n\n{wrap_code_blocks_flag}\n\n"
lines = output.split("\n")
header = ""
block = []
for line in lines:
if line.startswith(" -") or line.startswith(" -"):
rendered_block = "\n".join(block)
if header:
final_doc += f"## {header}\n```shell\n{rendered_block}\n```\n"
else:
final_doc += f"```shell\n{rendered_block}\n```\n"
block = []
tokens = line.split("<")
if len(tokens) > 1:
header = tokens[-1][:-1]
else:
header = line.split("--")[-1]
header = header.upper().replace("-", "_")
block.append(line)
rendered_block = "\n".join(block)
final_doc += f"## {header}\n```shell\n{rendered_block}\n```\n"
block = []
filename = "docs/source/reference/launcher.md"
if check:
with open(filename, "r") as f:
doc = f.read()
if doc != final_doc:
tmp = "launcher.md"
with open(tmp, "w") as g:
g.write(final_doc)
diff = subprocess.run(
["diff", tmp, filename], capture_output=True
).stdout.decode("utf-8")
print(diff)
raise Exception(
"Cli arguments Doc is not up-to-date, run `python update_doc.py` in order to update it"
)
else:
with open(filename, "w") as f:
f.write(final_doc)
def check_supported_models(check: bool):
filename = "server/text_generation_server/models/__init__.py"
with open(filename, "r") as f:
tree = ast.parse(f.read())
enum_def = [
x for x in tree.body if isinstance(x, ast.ClassDef) and x.name == "ModelType"
][0]
_locals = {}
_globals = {}
exec(f"import enum\n{ast.unparse(enum_def)}", _globals, _locals)
ModelType = _locals["ModelType"]
list_string = ""
for data in ModelType:
list_string += f"- [{data.value['name']}]({data.value['url']})"
if data.value.get("multimodal", None):
list_string += " (Multimodal)"
list_string += "\n"
final_doc = TEMPLATE.replace("SUPPORTED_MODELS", list_string)
filename = "docs/source/supported_models.md"
if check:
with open(filename, "r") as f:
doc = f.read()
if doc != final_doc:
tmp = "supported.md"
with open(tmp, "w") as g:
g.write(final_doc)
diff = subprocess.run(
["diff", tmp, filename], capture_output=True
).stdout.decode("utf-8")
print(diff)
raise Exception(
"Supported models is not up-to-date, run `python update_doc.py` in order to update it"
)
else:
with open(filename, "w") as f:
f.write(final_doc)
def get_openapi_schema():
try:
output = subprocess.check_output(["text-generation-router", "print-schema"])
return json.loads(output)
except subprocess.CalledProcessError as e:
print(f"Error running text-generation-router print-schema: {e}")
raise SystemExit(1)
except json.JSONDecodeError:
print("Error: Invalid JSON received from text-generation-router print-schema")
raise SystemExit(1)
def check_openapi(check: bool):
new_openapi_data = get_openapi_schema()
filename = "docs/openapi.json"
tmp_filename = "openapi_tmp.json"
with open(tmp_filename, "w") as f:
json.dump(new_openapi_data, f, indent=2)
if check:
diff = subprocess.run(
[
"diff",
# allow for trailing whitespace since it's not significant
# and the precommit hook will remove it
"--ignore-trailing-space",
tmp_filename,
filename,
],
capture_output=True,
).stdout.decode("utf-8")
os.remove(tmp_filename)
if diff:
print(diff)
raise Exception(
"OpenAPI documentation is not up-to-date, run `python update_doc.py` in order to update it"
)
else:
os.rename(tmp_filename, filename)
print("OpenAPI documentation updated.")
p = subprocess.run(
[
"redocly",
# allow for trailing whitespace since it's not significant
# and the precommit hook will remove it
"lint",
"--skip-rule",
"security-defined",
filename,
],
capture_output=True,
)
errors = p.stderr.decode("utf-8")
# The openapi specs fails on `exclusive_minimum` which is expected to be a boolean where
# utoipa outputs a value instead: https://github.com/juhaku/utoipa/issues/969
print(errors)
if p.returncode != 0:
print(errors)
raise Exception(
f"OpenAPI documentation is invalid, `redocly lint {filename}` showed some error:\n {errors}"
)
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--check", action="store_true")
args = parser.parse_args()
check_cli(args.check)
check_supported_models(args.check)
check_openapi(args.check)
if __name__ == "__main__":
main()
| text-generation-inference/update_doc.py/0 | {
"file_path": "text-generation-inference/update_doc.py",
"repo_id": "text-generation-inference",
"token_count": 2987
} |
.PHONY: style check-style test
DATA_DIR = data
dir_guard=@mkdir -p $(@D)
# Format source code automatically
style:
npm run lint
# Check the source code is formatted correctly
check-style:
npm run lint-check
TESTS_RESOURCES = $(DATA_DIR)/small.txt $(DATA_DIR)/roberta.json $(DATA_DIR)/tokenizer-wiki.json $(DATA_DIR)/bert-wiki.json
# Launch the test suite
test: $(TESTS_RESOURCES)
npm run test
$(DATA_DIR)/big.txt :
$(dir_guard)
wget https://norvig.com/big.txt -O $@
$(DATA_DIR)/small.txt : $(DATA_DIR)/big.txt
head -100 $(DATA_DIR)/big.txt > $@
$(DATA_DIR)/roberta.json :
$(dir_guard)
wget https://huggingface.co/roberta-large/raw/main/tokenizer.json -O $@
$(DATA_DIR)/tokenizer-wiki.json :
$(dir_guard)
wget https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-quicktour/tokenizer.json -O $@
$(DATA_DIR)/bert-wiki.json :
$(dir_guard)
wget https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-pipeline/tokenizer.json -O $@
| tokenizers/bindings/node/Makefile/0 | {
"file_path": "tokenizers/bindings/node/Makefile",
"repo_id": "tokenizers",
"token_count": 406
} |
import {
byteLevelPreTokenizer,
metaspacePreTokenizer,
punctuationPreTokenizer,
sequencePreTokenizer,
splitPreTokenizer,
whitespaceSplitPreTokenizer,
} from '../../'
describe('byteLevelPreTokenizer', () => {
it('instantiates correctly', () => {
const processor = byteLevelPreTokenizer()
expect(processor.constructor.name).toEqual('PreTokenizer')
})
})
describe('metaspacePreTokenizer', () => {
it('instantiates correctly without any parameter', () => {
const processor = metaspacePreTokenizer()
expect(processor.constructor.name).toEqual('PreTokenizer')
})
it('accepts `undefined` as first parameter', () => {
expect(metaspacePreTokenizer(undefined)).toBeDefined()
})
it('accepts `undefined` as second parameter', () => {
expect(metaspacePreTokenizer('t', undefined)).toBeDefined()
})
it('can pre-tokenize strings', () => {
const pretok = metaspacePreTokenizer()
expect(pretok.preTokenizeString('Hello there friend')).toEqual([
['▁Hello', [0, 5]],
['▁there', [5, 11]],
['▁friend', [11, 18]],
])
})
})
describe('punctuationPreTokenizer', () => {
it('instantiates correctly without any parameter', () => {
const processor = punctuationPreTokenizer()
expect(processor.constructor.name).toEqual('PreTokenizer')
})
it('instantiates correctly with non-default split delimeter', () => {
const processor = punctuationPreTokenizer('removed')
expect(processor.constructor.name).toEqual('PreTokenizer')
})
})
describe('splitPreTokenizer', () => {
it('instantiates correctly with invert parameter', () => {
const processor = splitPreTokenizer(' ', 'mergedWithPrevious', false)
expect(processor.constructor.name).toEqual('PreTokenizer')
})
})
describe('sequencePreTokenizer', () => {
it('instantiates correctly', () => {
const punctuation = punctuationPreTokenizer()
const whitespace = whitespaceSplitPreTokenizer()
const sequence2 = sequencePreTokenizer([])
expect(sequence2.constructor.name).toEqual('PreTokenizer')
const sequence3 = sequencePreTokenizer([punctuation, whitespace])
expect(sequence3.constructor.name).toEqual('PreTokenizer')
})
})
| tokenizers/bindings/node/lib/bindings/pre-tokenizers.test.ts/0 | {
"file_path": "tokenizers/bindings/node/lib/bindings/pre-tokenizers.test.ts",
"repo_id": "tokenizers",
"token_count": 728
} |
{
"name": "tokenizers-linux-arm64-gnu",
"version": "0.13.4-rc1",
"os": [
"linux"
],
"cpu": [
"arm64"
],
"main": "tokenizers.linux-arm64-gnu.node",
"files": [
"tokenizers.linux-arm64-gnu.node"
],
"description": "Tokenizers platform specific bindings",
"keywords": [
"napi-rs",
"NAPI",
"N-API",
"Rust",
"node-addon",
"node-addon-api"
],
"license": "MIT",
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"repository": "tokenizers",
"libc": [
"glibc"
]
} | tokenizers/bindings/node/npm/linux-arm64-gnu/package.json/0 | {
"file_path": "tokenizers/bindings/node/npm/linux-arm64-gnu/package.json",
"repo_id": "tokenizers",
"token_count": 289
} |
use crate::arc_rwlock_serde;
use serde::{Deserialize, Serialize};
extern crate tokenizers as tk;
use napi::bindgen_prelude::*;
use napi_derive::napi;
use std::sync::{Arc, RwLock};
use tk::decoders::DecoderWrapper;
/// Decoder
#[derive(Clone, Serialize, Deserialize)]
#[napi]
pub struct Decoder {
#[serde(flatten, with = "arc_rwlock_serde")]
decoder: Option<Arc<RwLock<DecoderWrapper>>>,
}
#[napi]
impl Decoder {
#[napi]
pub fn decode(&self, tokens: Vec<String>) -> Result<String> {
use tk::Decoder;
self
.decoder
.as_ref()
.unwrap()
.read()
.unwrap()
.decode(tokens)
.map_err(|e| Error::from_reason(format!("{}", e)))
}
}
impl tk::Decoder for Decoder {
fn decode_chain(&self, tokens: Vec<String>) -> tk::Result<Vec<String>> {
self
.decoder
.as_ref()
.ok_or("Uninitialized Decoder")?
.read()
.unwrap()
.decode_chain(tokens)
}
}
#[napi]
pub fn bpe_decoder(suffix: Option<String>) -> Decoder {
let suffix = suffix.unwrap_or("</w>".to_string());
let decoder = Some(Arc::new(RwLock::new(
tk::decoders::bpe::BPEDecoder::new(suffix).into(),
)));
Decoder { decoder }
}
#[napi]
pub fn byte_fallback_decoder() -> Decoder {
Decoder {
decoder: Some(Arc::new(RwLock::new(
tk::decoders::byte_fallback::ByteFallback::new().into(),
))),
}
}
#[napi]
pub fn ctc_decoder(
#[napi(ts_arg_type = "string = '<pad>'")] pad_token: Option<String>,
word_delimiter_token: Option<String>,
cleanup: Option<bool>,
) -> Decoder {
let pad_token = pad_token.unwrap_or("<pad>".to_string());
let word_delimiter_token = word_delimiter_token.unwrap_or("|".to_string());
let cleanup = cleanup.unwrap_or(true);
let decoder = Some(Arc::new(RwLock::new(
tk::decoders::ctc::CTC::new(pad_token, word_delimiter_token, cleanup).into(),
)));
Decoder { decoder }
}
#[napi]
pub fn fuse_decoder() -> Decoder {
Decoder {
decoder: Some(Arc::new(RwLock::new(
tk::decoders::fuse::Fuse::new().into(),
))),
}
}
#[napi]
pub fn metaspace_decoder(
#[napi(ts_arg_type = "string = '▁'")] replacement: Option<String>,
#[napi(ts_arg_type = "prepend_scheme = 'always'")] prepend_scheme: Option<String>,
#[napi(ts_arg_type = "split = true")] split: Option<bool>,
) -> Result<Decoder> {
use tk::pre_tokenizers::metaspace::PrependScheme;
let split = split.unwrap_or(true);
let replacement = replacement.unwrap_or("▁".to_string());
if replacement.chars().count() != 1 {
return Err(Error::from_reason(
"replacement is supposed to be a single char",
));
}
let replacement = replacement.chars().next().unwrap();
let prepend_scheme: PrependScheme =
match prepend_scheme.unwrap_or(String::from("always")).as_str() {
"always" => PrependScheme::Always,
"first" => PrependScheme::First,
"never" => PrependScheme::Never,
_ => {
return Err(Error::from_reason(
"prepend_scheme is supposed to be either 'always', 'first' or 'never'",
));
}
};
Ok(Decoder {
decoder: Some(Arc::new(RwLock::new(
tk::decoders::metaspace::Metaspace::new(replacement, prepend_scheme, split).into(),
))),
})
}
#[napi]
pub fn replace_decoder(pattern: String, content: String) -> Result<Decoder> {
Ok(Decoder {
decoder: Some(Arc::new(RwLock::new(
tk::normalizers::replace::Replace::new(pattern, content)
.map_err(|e| Error::from_reason(e.to_string()))?
.into(),
))),
})
}
#[napi]
pub fn sequence_decoder(decoders: Vec<&Decoder>) -> Decoder {
let sequence: Vec<tk::DecoderWrapper> = decoders
.into_iter()
.filter_map(|decoder| {
decoder
.decoder
.as_ref()
.map(|decoder| (**decoder).read().unwrap().clone())
})
.clone()
.collect();
Decoder {
decoder: Some(Arc::new(RwLock::new(tk::DecoderWrapper::Sequence(
tk::decoders::sequence::Sequence::new(sequence),
)))),
}
}
#[napi]
pub fn strip_decoder(content: String, left: u32, right: u32) -> Result<Decoder> {
let content: char = content.chars().next().ok_or(Error::from_reason(
"Expected non empty string for strip pattern",
))?;
Ok(Decoder {
decoder: Some(Arc::new(RwLock::new(
tk::decoders::strip::Strip::new(content, left as usize, right as usize).into(),
))),
})
}
#[napi]
pub fn word_piece_decoder(
#[napi(ts_arg_type = "string = '##'")] prefix: Option<String>,
#[napi(ts_arg_type = "bool = true")] cleanup: Option<bool>,
) -> Decoder {
let prefix = prefix.unwrap_or("##".to_string());
let cleanup = cleanup.unwrap_or(true);
Decoder {
decoder: Some(Arc::new(RwLock::new(
tk::decoders::wordpiece::WordPiece::new(prefix, cleanup).into(),
))),
}
}
| tokenizers/bindings/node/src/decoders.rs/0 | {
"file_path": "tokenizers/bindings/node/src/decoders.rs",
"repo_id": "tokenizers",
"token_count": 2038
} |
[target.x86_64-apple-darwin]
rustflags = [
"-C", "link-arg=-undefined",
"-C", "link-arg=dynamic_lookup",
"-C", "link-arg=-mmacosx-version-min=10.11",
]
[target.aarch64-apple-darwin]
rustflags = [
"-C", "link-arg=-undefined",
"-C", "link-arg=dynamic_lookup",
"-C", "link-arg=-mmacosx-version-min=10.11",
]
| tokenizers/bindings/python/.cargo/config.toml/0 | {
"file_path": "tokenizers/bindings/python/.cargo/config.toml",
"repo_id": "tokenizers",
"token_count": 146
} |
# Generated content DO NOT EDIT
class AddedToken:
"""
Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`.
It can have special options that defines the way it should behave.
Args:
content (:obj:`str`): The content of the token
single_word (:obj:`bool`, defaults to :obj:`False`):
Defines whether this token should only match single words. If :obj:`True`, this
token will never match inside of a word. For example the token ``ing`` would match
on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`.
The notion of "`inside of a word`" is defined by the word boundaries pattern in
regular expressions (ie. the token should start and end with word boundaries).
lstrip (:obj:`bool`, defaults to :obj:`False`):
Defines whether this token should strip all potential whitespaces on its left side.
If :obj:`True`, this token will greedily match any whitespace on its left. For
example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text
``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left).
rstrip (:obj:`bool`, defaults to :obj:`False`):
Defines whether this token should strip all potential whitespaces on its right
side. If :obj:`True`, this token will greedily match any whitespace on its right.
It works just like :obj:`lstrip` but on the right.
normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
Defines whether this token should match against the normalized version of the input
text. For example, with the added token ``"yesterday"``, and a normalizer in charge of
lowercasing the text, the token could be extract from the input ``"I saw a lion
Yesterday"``.
special (:obj:`bool`, defaults to :obj:`False` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
Defines whether this token should be skipped when decoding.
"""
def __init__(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True, special=False):
pass
@property
def content(self):
"""
Get the content of this :obj:`AddedToken`
"""
pass
@property
def lstrip(self):
"""
Get the value of the :obj:`lstrip` option
"""
pass
@property
def normalized(self):
"""
Get the value of the :obj:`normalized` option
"""
pass
@property
def rstrip(self):
"""
Get the value of the :obj:`rstrip` option
"""
pass
@property
def single_word(self):
"""
Get the value of the :obj:`single_word` option
"""
pass
@property
def special(self):
"""
Get the value of the :obj:`special` option
"""
pass
class Encoding:
"""
The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`.
"""
@property
def attention_mask(self):
"""
The attention mask
This indicates to the LM which tokens should be attended to, and which should not.
This is especially important when batching sequences, where we need to applying
padding.
Returns:
:obj:`List[int]`: The attention mask
"""
pass
def char_to_token(self, char_pos, sequence_index=0):
"""
Get the token that contains the char at the given position in the input sequence.
Args:
char_pos (:obj:`int`):
The position of a char in the input string
sequence_index (:obj:`int`, defaults to :obj:`0`):
The index of the sequence that contains the target char
Returns:
:obj:`int`: The index of the token that contains this char in the encoded sequence
"""
pass
def char_to_word(self, char_pos, sequence_index=0):
"""
Get the word that contains the char at the given position in the input sequence.
Args:
char_pos (:obj:`int`):
The position of a char in the input string
sequence_index (:obj:`int`, defaults to :obj:`0`):
The index of the sequence that contains the target char
Returns:
:obj:`int`: The index of the word that contains this char in the input sequence
"""
pass
@property
def ids(self):
"""
The generated IDs
The IDs are the main input to a Language Model. They are the token indices,
the numerical representations that a LM understands.
Returns:
:obj:`List[int]`: The list of IDs
"""
pass
@staticmethod
def merge(encodings, growing_offsets=True):
"""
Merge the list of encodings into one final :class:`~tokenizers.Encoding`
Args:
encodings (A :obj:`List` of :class:`~tokenizers.Encoding`):
The list of encodings that should be merged in one
growing_offsets (:obj:`bool`, defaults to :obj:`True`):
Whether the offsets should accumulate while merging
Returns:
:class:`~tokenizers.Encoding`: The resulting Encoding
"""
pass
@property
def n_sequences(self):
"""
The number of sequences represented
Returns:
:obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding`
"""
pass
@property
def offsets(self):
"""
The offsets associated to each token
These offsets let's you slice the input string, and thus retrieve the original
part that led to producing the corresponding token.
Returns:
A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets
"""
pass
@property
def overflowing(self):
"""
A :obj:`List` of overflowing :class:`~tokenizers.Encoding`
When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting
the output into as many pieces as required to match the specified maximum length.
This field lets you retrieve all the subsequent pieces.
When you use pairs of sequences, the overflowing pieces will contain enough
variations to cover all the possible combinations, while respecting the provided
maximum length.
"""
pass
def pad(self, length, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]"):
"""
Pad the :class:`~tokenizers.Encoding` at the given length
Args:
length (:obj:`int`):
The desired length
direction: (:obj:`str`, defaults to :obj:`right`):
The expected padding direction. Can be either :obj:`right` or :obj:`left`
pad_id (:obj:`int`, defaults to :obj:`0`):
The ID corresponding to the padding token
pad_type_id (:obj:`int`, defaults to :obj:`0`):
The type ID corresponding to the padding token
pad_token (:obj:`str`, defaults to `[PAD]`):
The pad token to use
"""
pass
@property
def sequence_ids(self):
"""
The generated sequence indices.
They represent the index of the input sequence associated to each token.
The sequence id can be None if the token is not related to any input sequence,
like for example with special tokens.
Returns:
A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index.
"""
pass
def set_sequence_id(self, sequence_id):
"""
Set the given sequence index
Set the given sequence index for the whole range of tokens contained in this
:class:`~tokenizers.Encoding`.
"""
pass
@property
def special_tokens_mask(self):
"""
The special token mask
This indicates which tokens are special tokens, and which are not.
Returns:
:obj:`List[int]`: The special tokens mask
"""
pass
def token_to_chars(self, token_index):
"""
Get the offsets of the token at the given index.
The returned offsets are related to the input sequence that contains the
token. In order to determine in which input sequence it belongs, you
must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
Args:
token_index (:obj:`int`):
The index of a token in the encoded sequence.
Returns:
:obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)`
"""
pass
def token_to_sequence(self, token_index):
"""
Get the index of the sequence represented by the given token.
In the general use case, this method returns :obj:`0` for a single sequence or
the first sequence of a pair, and :obj:`1` for the second sequence of a pair
Args:
token_index (:obj:`int`):
The index of a token in the encoded sequence.
Returns:
:obj:`int`: The sequence id of the given token
"""
pass
def token_to_word(self, token_index):
"""
Get the index of the word that contains the token in one of the input sequences.
The returned word index is related to the input sequence that contains
the token. In order to determine in which input sequence it belongs, you
must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
Args:
token_index (:obj:`int`):
The index of a token in the encoded sequence.
Returns:
:obj:`int`: The index of the word in the relevant input sequence.
"""
pass
@property
def tokens(self):
"""
The generated tokens
They are the string representation of the IDs.
Returns:
:obj:`List[str]`: The list of tokens
"""
pass
def truncate(self, max_length, stride=0, direction="right"):
"""
Truncate the :class:`~tokenizers.Encoding` at the given length
If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating
this information is lost. It will be considered as representing a single sequence.
Args:
max_length (:obj:`int`):
The desired length
stride (:obj:`int`, defaults to :obj:`0`):
The length of previous content to be included in each overflowing piece
direction (:obj:`str`, defaults to :obj:`right`):
Truncate direction
"""
pass
@property
def type_ids(self):
"""
The generated type IDs
Generally used for tasks like sequence classification or question answering,
these tokens let the LM know which input sequence corresponds to each tokens.
Returns:
:obj:`List[int]`: The list of type ids
"""
pass
@property
def word_ids(self):
"""
The generated word indices.
They represent the index of the word associated to each token.
When the input is pre-tokenized, they correspond to the ID of the given input label,
otherwise they correspond to the words indices as defined by the
:class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
For special tokens and such (any token that was generated from something that was
not part of the input), the output is :obj:`None`
Returns:
A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
"""
pass
def word_to_chars(self, word_index, sequence_index=0):
"""
Get the offsets of the word at the given index in one of the input sequences.
Args:
word_index (:obj:`int`):
The index of a word in one of the input sequences.
sequence_index (:obj:`int`, defaults to :obj:`0`):
The index of the sequence that contains the target word
Returns:
:obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)`
"""
pass
def word_to_tokens(self, word_index, sequence_index=0):
"""
Get the encoded tokens corresponding to the word at the given index
in one of the input sequences.
Args:
word_index (:obj:`int`):
The index of a word in one of the input sequences.
sequence_index (:obj:`int`, defaults to :obj:`0`):
The index of the sequence that contains the target word
Returns:
:obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)`
"""
pass
@property
def words(self):
"""
The generated word indices.
.. warning::
This is deprecated and will be removed in a future version.
Please use :obj:`~tokenizers.Encoding.word_ids` instead.
They represent the index of the word associated to each token.
When the input is pre-tokenized, they correspond to the ID of the given input label,
otherwise they correspond to the words indices as defined by the
:class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
For special tokens and such (any token that was generated from something that was
not part of the input), the output is :obj:`None`
Returns:
A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
"""
pass
class NormalizedString:
"""
NormalizedString
A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one.
While making all the requested modifications, it keeps track of the alignment information
between the two versions of the string.
Args:
sequence: str:
The string sequence used to initialize this NormalizedString
"""
def append(self, s):
"""
Append the given sequence to the string
"""
pass
def clear(self):
"""
Clears the string
"""
pass
def filter(self, func):
"""
Filter each character of the string using the given func
"""
pass
def for_each(self, func):
"""
Calls the given function for each character of the string
"""
pass
def lowercase(self):
"""
Lowercase the string
"""
pass
def lstrip(self):
"""
Strip the left of the string
"""
pass
def map(self, func):
"""
Calls the given function for each character of the string
Replaces each character of the string using the returned value. Each
returned value **must** be a str of length 1 (ie a character).
"""
pass
def nfc(self):
"""
Runs the NFC normalization
"""
pass
def nfd(self):
"""
Runs the NFD normalization
"""
pass
def nfkc(self):
"""
Runs the NFKC normalization
"""
pass
def nfkd(self):
"""
Runs the NFKD normalization
"""
pass
@property
def normalized(self):
"""
The normalized part of the string
"""
pass
def prepend(self, s):
"""
Prepend the given sequence to the string
"""
pass
def replace(self, pattern, content):
"""
Replace the content of the given pattern with the provided content
Args:
pattern: Pattern:
A pattern used to match the string. Usually a string or a Regex
content: str:
The content to be used as replacement
"""
pass
def rstrip(self):
"""
Strip the right of the string
"""
pass
def slice(self, range):
"""
Slice the string using the given range
"""
pass
def split(self, pattern, behavior):
"""
Split the NormalizedString using the given pattern and the specified behavior
Args:
pattern: Pattern:
A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex`
behavior: SplitDelimiterBehavior:
The behavior to use when splitting.
Choices: "removed", "isolated", "merged_with_previous", "merged_with_next",
"contiguous"
Returns:
A list of NormalizedString, representing each split
"""
pass
def strip(self):
"""
Strip both ends of the string
"""
pass
def uppercase(self):
"""
Uppercase the string
"""
pass
class PreTokenizedString:
"""
PreTokenizedString
Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the
underlying string, while keeping track of the alignment information (offsets).
The PreTokenizedString manages what we call `splits`. Each split represents a substring
which is a subpart of the original string, with the relevant offsets and tokens.
When calling one of the methods used to modify the PreTokenizedString (namely one of
`split`, `normalize` or `tokenize), only the `splits` that don't have any associated
tokens will get modified.
Args:
sequence: str:
The string sequence used to initialize this PreTokenizedString
"""
def __init__(self, sequence):
pass
def get_splits(self, offset_referential="original", offset_type="char"):
"""
Get the splits currently managed by the PreTokenizedString
Args:
offset_referential: :obj:`str`
Whether the returned splits should have offsets expressed relative
to the original string, or the normalized one. choices: "original", "normalized".
offset_type: :obj:`str`
Whether the returned splits should have offsets expressed in bytes or chars.
When slicing an str, we usually want to use chars, which is the default value.
Now in some cases it might be interesting to get these offsets expressed in bytes,
so it is possible to change this here.
choices: "char", "bytes"
Returns
A list of splits
"""
pass
def normalize(self, func):
"""
Normalize each split of the `PreTokenizedString` using the given `func`
Args:
func: Callable[[NormalizedString], None]:
The function used to normalize each underlying split. This function
does not need to return anything, just calling the methods on the provided
NormalizedString allow its modification.
"""
pass
def split(self, func):
"""
Split the PreTokenizedString using the given `func`
Args:
func: Callable[[index, NormalizedString], List[NormalizedString]]:
The function used to split each underlying split.
It is expected to return a list of `NormalizedString`, that represent the new
splits. If the given `NormalizedString` does not need any splitting, we can
just return it directly.
In order for the offsets to be tracked accurately, any returned `NormalizedString`
should come from calling either `.split` or `.slice` on the received one.
"""
pass
def to_encoding(self, type_id=0, word_idx=None):
"""
Return an Encoding generated from this PreTokenizedString
Args:
type_id: int = 0:
The type_id to be used on the generated Encoding.
word_idx: Optional[int] = None:
An optional word index to be used for each token of this Encoding. If provided,
all the word indices in the generated Encoding will use this value, instead
of the one automatically tracked during pre-tokenization.
Returns:
An Encoding
"""
pass
def tokenize(self, func):
"""
Tokenize each split of the `PreTokenizedString` using the given `func`
Args:
func: Callable[[str], List[Token]]:
The function used to tokenize each underlying split. This function must return
a list of Token generated from the input str.
"""
pass
class Regex:
"""
Instantiate a new Regex with the given pattern
"""
def __init__(self, pattern):
pass
class Token:
pass
class Tokenizer:
"""
A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input
and outputs an :class:`~tokenizers.Encoding`.
Args:
model (:class:`~tokenizers.models.Model`):
The core algorithm that this :obj:`Tokenizer` should be using.
"""
def __init__(self, model):
pass
def add_special_tokens(self, tokens):
"""
Add the given special tokens to the Tokenizer.
If these tokens are already part of the vocabulary, it just let the Tokenizer know about
them. If they don't exist, the Tokenizer creates them, giving them a new id.
These special tokens will never be processed by the model (ie won't be split into
multiple tokens), and they can be removed from the output when decoding.
Args:
tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
The list of special tokens we want to add to the vocabulary. Each token can either
be a string or an instance of :class:`~tokenizers.AddedToken` for more
customization.
Returns:
:obj:`int`: The number of tokens that were created in the vocabulary
"""
pass
def add_tokens(self, tokens):
"""
Add the given tokens to the vocabulary
The given tokens are added only if they don't already exist in the vocabulary.
Each token then gets a new attributed id.
Args:
tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
The list of tokens we want to add to the vocabulary. Each token can be either a
string or an instance of :class:`~tokenizers.AddedToken` for more customization.
Returns:
:obj:`int`: The number of tokens that were created in the vocabulary
"""
pass
def decode(self, ids, skip_special_tokens=True):
"""
Decode the given list of ids back to a string
This is used to decode anything coming back from a Language Model
Args:
ids (A :obj:`List/Tuple` of :obj:`int`):
The list of ids that we want to decode
skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether the special tokens should be removed from the decoded string
Returns:
:obj:`str`: The decoded string
"""
pass
def decode_batch(self, sequences, skip_special_tokens=True):
"""
Decode a batch of ids back to their corresponding string
Args:
sequences (:obj:`List` of :obj:`List[int]`):
The batch of sequences we want to decode
skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether the special tokens should be removed from the decoded strings
Returns:
:obj:`List[str]`: A list of decoded strings
"""
pass
@property
def decoder(self):
"""
The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer
"""
pass
def enable_padding(
self, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]", length=None, pad_to_multiple_of=None
):
"""
Enable the padding
Args:
direction (:obj:`str`, `optional`, defaults to :obj:`right`):
The direction in which to pad. Can be either ``right`` or ``left``
pad_to_multiple_of (:obj:`int`, `optional`):
If specified, the padding length should always snap to the next multiple of the
given value. For example if we were going to pad witha length of 250 but
``pad_to_multiple_of=8`` then we will pad to 256.
pad_id (:obj:`int`, defaults to 0):
The id to be used when padding
pad_type_id (:obj:`int`, defaults to 0):
The type id to be used when padding
pad_token (:obj:`str`, defaults to :obj:`[PAD]`):
The pad token to be used when padding
length (:obj:`int`, `optional`):
If specified, the length at which to pad. If not specified we pad using the size of
the longest sequence in a batch.
"""
pass
def enable_truncation(self, max_length, stride=0, strategy="longest_first", direction="right"):
"""
Enable truncation
Args:
max_length (:obj:`int`):
The max length at which to truncate
stride (:obj:`int`, `optional`):
The length of the previous first sequence to be included in the overflowing
sequence
strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`):
The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or
``only_second``.
direction (:obj:`str`, defaults to :obj:`right`):
Truncate direction
"""
pass
def encode(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True):
"""
Encode the given sequence and pair. This method can process raw text sequences
as well as already pre-tokenized sequences.
Example:
Here are some examples of the inputs that are accepted::
encode("A single sequence")`
encode("A sequence", "And its pair")`
encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)`
encode(
[ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ],
is_pretokenized=True
)
Args:
sequence (:obj:`~tokenizers.InputSequence`):
The main input sequence we want to encode. This sequence can be either raw
text or pre-tokenized, according to the ``is_pretokenized`` argument:
- If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence`
- If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence`
pair (:obj:`~tokenizers.InputSequence`, `optional`):
An optional input sequence. The expected format is the same that for ``sequence``.
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
Whether the input is already pre-tokenized
add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether to add the special tokens
Returns:
:class:`~tokenizers.Encoding`: The encoded result
"""
pass
def encode_batch(self, input, is_pretokenized=False, add_special_tokens=True):
"""
Encode the given batch of inputs. This method accept both raw text sequences
as well as already pre-tokenized sequences. The reason we use `PySequence` is
because it allows type checking with zero-cost (according to PyO3) as we don't
have to convert to check.
Example:
Here are some examples of the inputs that are accepted::
encode_batch([
"A single sequence",
("A tuple with a sequence", "And its pair"),
[ "A", "pre", "tokenized", "sequence" ],
([ "A", "pre", "tokenized", "sequence" ], "And its pair")
])
Args:
input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`):
A list of single sequences or pair sequences to encode. Each sequence
can be either raw text or pre-tokenized, according to the ``is_pretokenized``
argument:
- If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput`
- If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput`
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
Whether the input is already pre-tokenized
add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether to add the special tokens
Returns:
A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch
"""
pass
def encode_batch_fast(self, input, is_pretokenized=False, add_special_tokens=True):
"""
Encode the given batch of inputs. This method is faster than `encode_batch`
because it doesn't keep track of offsets, they will be all zeros.
Example:
Here are some examples of the inputs that are accepted::
encode_batch_fast([
"A single sequence",
("A tuple with a sequence", "And its pair"),
[ "A", "pre", "tokenized", "sequence" ],
([ "A", "pre", "tokenized", "sequence" ], "And its pair")
])
Args:
input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`):
A list of single sequences or pair sequences to encode. Each sequence
can be either raw text or pre-tokenized, according to the ``is_pretokenized``
argument:
- If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput`
- If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput`
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
Whether the input is already pre-tokenized
add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether to add the special tokens
Returns:
A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch
"""
pass
@property
def encode_special_tokens(self):
"""
Modifies the tokenizer in order to use or not the special tokens
during encoding.
Args:
value (:obj:`bool`):
Whether to use the special tokens or not
"""
pass
@staticmethod
def from_buffer(buffer):
"""
Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer.
Args:
buffer (:obj:`bytes`):
A buffer containing a previously serialized :class:`~tokenizers.Tokenizer`
Returns:
:class:`~tokenizers.Tokenizer`: The new tokenizer
"""
pass
@staticmethod
def from_file(path):
"""
Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path.
Args:
path (:obj:`str`):
A path to a local JSON file representing a previously serialized
:class:`~tokenizers.Tokenizer`
Returns:
:class:`~tokenizers.Tokenizer`: The new tokenizer
"""
pass
@staticmethod
def from_pretrained(identifier, revision="main", token=None):
"""
Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the
Hugging Face Hub.
Args:
identifier (:obj:`str`):
The identifier of a Model on the Hugging Face Hub, that contains
a tokenizer.json file
revision (:obj:`str`, defaults to `main`):
A branch or commit id
token (:obj:`str`, `optional`, defaults to `None`):
An optional auth token used to access private repositories on the
Hugging Face Hub
Returns:
:class:`~tokenizers.Tokenizer`: The new tokenizer
"""
pass
@staticmethod
def from_str(json):
"""
Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string.
Args:
json (:obj:`str`):
A valid JSON string representing a previously serialized
:class:`~tokenizers.Tokenizer`
Returns:
:class:`~tokenizers.Tokenizer`: The new tokenizer
"""
pass
def get_added_tokens_decoder(self):
"""
Get the underlying vocabulary
Returns:
:obj:`Dict[int, AddedToken]`: The vocabulary
"""
pass
def get_vocab(self, with_added_tokens=True):
"""
Get the underlying vocabulary
Args:
with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether to include the added tokens
Returns:
:obj:`Dict[str, int]`: The vocabulary
"""
pass
def get_vocab_size(self, with_added_tokens=True):
"""
Get the size of the underlying vocabulary
Args:
with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether to include the added tokens
Returns:
:obj:`int`: The size of the vocabulary
"""
pass
def id_to_token(self, id):
"""
Convert the given id to its corresponding token if it exists
Args:
id (:obj:`int`):
The id to convert
Returns:
:obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary
"""
pass
@property
def model(self):
"""
The :class:`~tokenizers.models.Model` in use by the Tokenizer
"""
pass
def no_padding(self):
"""
Disable padding
"""
pass
def no_truncation(self):
"""
Disable truncation
"""
pass
@property
def normalizer(self):
"""
The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer
"""
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
:param is_pair: Boolean indicating if the input would be a single sentence or a pair
:return:
"""
pass
@property
def padding(self):
"""
Get the current padding parameters
`Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead`
Returns:
(:obj:`dict`, `optional`):
A dict with the current padding parameters if padding is enabled
"""
pass
def post_process(self, encoding, pair=None, add_special_tokens=True):
"""
Apply all the post-processing steps to the given encodings.
The various steps are:
1. Truncate according to the set truncation params (provided with
:meth:`~tokenizers.Tokenizer.enable_truncation`)
2. Apply the :class:`~tokenizers.processors.PostProcessor`
3. Pad according to the set padding params (provided with
:meth:`~tokenizers.Tokenizer.enable_padding`)
Args:
encoding (:class:`~tokenizers.Encoding`):
The :class:`~tokenizers.Encoding` corresponding to the main sequence.
pair (:class:`~tokenizers.Encoding`, `optional`):
An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence.
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Returns:
:class:`~tokenizers.Encoding`: The final post-processed encoding
"""
pass
@property
def post_processor(self):
"""
The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer
"""
pass
@property
def pre_tokenizer(self):
"""
The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer
"""
pass
def save(self, path, pretty=True):
"""
Save the :class:`~tokenizers.Tokenizer` to the file at the given path.
Args:
path (:obj:`str`):
A path to a file in which to save the serialized tokenizer.
pretty (:obj:`bool`, defaults to :obj:`True`):
Whether the JSON file should be pretty formatted.
"""
pass
def to_str(self, pretty=False):
"""
Gets a serialized string representing this :class:`~tokenizers.Tokenizer`.
Args:
pretty (:obj:`bool`, defaults to :obj:`False`):
Whether the JSON string should be pretty formatted.
Returns:
:obj:`str`: A string representing the serialized Tokenizer
"""
pass
def token_to_id(self, token):
"""
Convert the given token to its corresponding id if it exists
Args:
token (:obj:`str`):
The token to convert
Returns:
:obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary
"""
pass
def train(self, files, trainer=None):
"""
Train the Tokenizer using the given files.
Reads the files line by line, while keeping all the whitespace, even new lines.
If you want to train from data store in-memory, you can check
:meth:`~tokenizers.Tokenizer.train_from_iterator`
Args:
files (:obj:`List[str]`):
A list of path to the files that we should use for training
trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
An optional trainer that should be used to train our Model
"""
pass
def train_from_iterator(self, iterator, trainer=None, length=None):
"""
Train the Tokenizer using the provided iterator.
You can provide anything that is a Python Iterator
* A list of sequences :obj:`List[str]`
* A generator that yields :obj:`str` or :obj:`List[str]`
* A Numpy array of strings
* ...
Args:
iterator (:obj:`Iterator`):
Any iterator over strings or list of strings
trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
An optional trainer that should be used to train our Model
length (:obj:`int`, `optional`):
The total number of sequences in the iterator. This is used to
provide meaningful progress tracking
"""
pass
@property
def truncation(self):
"""
Get the currently set truncation parameters
`Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead`
Returns:
(:obj:`dict`, `optional`):
A dict with the current truncation parameters if truncation is enabled
"""
pass
| tokenizers/bindings/python/py_src/tokenizers/__init__.pyi/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/__init__.pyi",
"repo_id": "tokenizers",
"token_count": 17247
} |
#![warn(clippy::all)]
#![allow(clippy::upper_case_acronyms)]
// Many false positives with pyo3 it seems &str, and &PyAny get flagged
#![allow(clippy::borrow_deref_ref)]
extern crate tokenizers as tk;
mod decoders;
mod encoding;
mod error;
mod models;
mod normalizers;
mod pre_tokenizers;
mod processors;
mod token;
mod tokenizer;
mod trainers;
mod utils;
use pyo3::prelude::*;
use pyo3::wrap_pymodule;
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
// For users using multiprocessing in python, it is quite easy to fork the process running
// tokenizers, ending up with a deadlock because we internally make use of multithreading. So
// we register a callback to be called in the event of a fork so that we can warn the user.
#[cfg(target_family = "unix")]
static mut REGISTERED_FORK_CALLBACK: bool = false;
#[cfg(target_family = "unix")]
extern "C" fn child_after_fork() {
use tk::parallelism::*;
if has_parallelism_been_used() && !is_parallelism_configured() {
eprintln!(
"huggingface/tokenizers: The current process just got forked, after parallelism has \
already been used. Disabling parallelism to avoid deadlocks..."
);
eprintln!("To disable this warning, you can either:");
eprintln!(
"\t- Avoid using `tokenizers` before the fork if possible\n\
\t- Explicitly set the environment variable {}=(true | false)",
ENV_VARIABLE
);
set_parallelism(false);
}
}
/// Tokenizers Module
#[pymodule]
pub fn tokenizers(m: &Bound<'_, PyModule>) -> PyResult<()> {
let _ = env_logger::try_init_from_env("TOKENIZERS_LOG");
// Register the fork callback
#[cfg(target_family = "unix")]
unsafe {
if !REGISTERED_FORK_CALLBACK {
libc::pthread_atfork(None, None, Some(child_after_fork));
REGISTERED_FORK_CALLBACK = true;
}
}
m.add_class::<tokenizer::PyTokenizer>()?;
m.add_class::<tokenizer::PyAddedToken>()?;
m.add_class::<token::PyToken>()?;
m.add_class::<encoding::PyEncoding>()?;
m.add_class::<utils::PyRegex>()?;
m.add_class::<utils::PyNormalizedString>()?;
m.add_class::<utils::PyPreTokenizedString>()?;
m.add_wrapped(wrap_pymodule!(models::models))?;
m.add_wrapped(wrap_pymodule!(pre_tokenizers::pre_tokenizers))?;
m.add_wrapped(wrap_pymodule!(decoders::decoders))?;
m.add_wrapped(wrap_pymodule!(processors::processors))?;
m.add_wrapped(wrap_pymodule!(normalizers::normalizers))?;
m.add_wrapped(wrap_pymodule!(trainers::trainers))?;
m.add("__version__", env!("CARGO_PKG_VERSION"))?;
Ok(())
}
| tokenizers/bindings/python/src/lib.rs/0 | {
"file_path": "tokenizers/bindings/python/src/lib.rs",
"repo_id": "tokenizers",
"token_count": 1086
} |
from tokenizers import BertWordPieceTokenizer
from ..utils import bert_files, data_dir, multiprocessing_with_parallelism
class TestBertWordPieceTokenizer:
def test_basic_encode(self, bert_files):
tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"])
# Encode with special tokens by default
output = tokenizer.encode("My name is John", "pair")
assert output.ids == [101, 2026, 2171, 2003, 2198, 102, 3940, 102]
assert output.tokens == [
"[CLS]",
"my",
"name",
"is",
"john",
"[SEP]",
"pair",
"[SEP]",
]
assert output.offsets == [
(0, 0),
(0, 2),
(3, 7),
(8, 10),
(11, 15),
(0, 0),
(0, 4),
(0, 0),
]
assert output.type_ids == [0, 0, 0, 0, 0, 0, 1, 1]
# Can encode without the special tokens
output = tokenizer.encode("My name is John", "pair", add_special_tokens=False)
assert output.ids == [2026, 2171, 2003, 2198, 3940]
assert output.tokens == ["my", "name", "is", "john", "pair"]
assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15), (0, 4)]
assert output.type_ids == [0, 0, 0, 0, 1]
def test_multiprocessing_with_parallelism(self, bert_files):
tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"])
multiprocessing_with_parallelism(tokenizer, False)
multiprocessing_with_parallelism(tokenizer, True)
def test_train_from_iterator(self):
text = ["A first sentence", "Another sentence", "And a last one"]
tokenizer = BertWordPieceTokenizer()
tokenizer.train_from_iterator(text, show_progress=False)
output = tokenizer.encode("A sentence")
assert output.tokens == ["a", "sentence"]
| tokenizers/bindings/python/tests/implementations/test_bert_wordpiece.py/0 | {
"file_path": "tokenizers/bindings/python/tests/implementations/test_bert_wordpiece.py",
"repo_id": "tokenizers",
"token_count": 914
} |
# Post-processors
<tokenizerslangcontent>
<python>
## BertProcessing
[[autodoc]] tokenizers.processors.BertProcessing
## ByteLevel
[[autodoc]] tokenizers.processors.ByteLevel
## RobertaProcessing
[[autodoc]] tokenizers.processors.RobertaProcessing
## TemplateProcessing
[[autodoc]] tokenizers.processors.TemplateProcessing
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | tokenizers/docs/source-doc-builder/api/post-processors.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/api/post-processors.mdx",
"repo_id": "tokenizers",
"token_count": 174
} |
use tokenizers::Tokenizer;
fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let tokenizer = Tokenizer::from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct", None)?;
let data = std::fs::read_to_string("data/big.txt")?;
let data: Vec<_> = data.lines().collect();
let add_special_tokens = false;
tokenizer.encode_batch_char_offsets(data, add_special_tokens)?;
Ok(())
}
| tokenizers/tokenizers/examples/encode_batch.rs/0 | {
"file_path": "tokenizers/tokenizers/examples/encode_batch.rs",
"repo_id": "tokenizers",
"token_count": 165
} |
use super::{super::OrderedVocabIter, convert_merges_to_hashmap, BpeBuilder, Pair, BPE};
use serde::{
de::{Error, MapAccess, Visitor},
ser::SerializeStruct,
Deserialize, Deserializer, Serialize, Serializer,
};
use std::collections::HashMap;
impl Serialize for BPE {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut model = serializer.serialize_struct("BPE", 8)?;
// Start by small fields
model.serialize_field("type", "BPE")?;
model.serialize_field("dropout", &self.dropout)?;
model.serialize_field("unk_token", &self.unk_token)?;
model.serialize_field("continuing_subword_prefix", &self.continuing_subword_prefix)?;
model.serialize_field("end_of_word_suffix", &self.end_of_word_suffix)?;
model.serialize_field("fuse_unk", &self.fuse_unk)?;
model.serialize_field("byte_fallback", &self.byte_fallback)?;
model.serialize_field("ignore_merges", &self.ignore_merges)?;
// Then the large ones
let mut merges: Vec<(&Pair, &u32)> = self
.merges
.iter()
.map(|(pair, (rank, _))| (pair, rank))
.collect();
merges.sort_unstable_by_key(|k| *k.1);
let merges = merges
.into_iter()
.map(|(pair, _)| (self.vocab_r[&pair.0].clone(), self.vocab_r[&pair.1].clone()))
.collect::<Vec<_>>();
let ordered_vocab = OrderedVocabIter::new(&self.vocab_r);
model.serialize_field("vocab", &ordered_vocab)?;
model.serialize_field("merges", &merges)?;
model.end()
}
}
impl<'de> Deserialize<'de> for BPE {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_struct(
"BPE",
&[
"type",
"dropout",
"unk_token",
"continuing_subword_prefix",
"end_of_word_suffix",
"fuse_unk",
"byte_fallback",
"ignore_merges",
"vocab",
"merges",
],
BPEVisitor,
)
}
}
struct BPEVisitor;
impl<'de> Visitor<'de> for BPEVisitor {
type Value = BPE;
fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "struct BPE")
}
fn visit_map<V>(self, mut map: V) -> std::result::Result<Self::Value, V::Error>
where
V: MapAccess<'de>,
{
let mut builder = BpeBuilder::new();
let mut vocab: Option<HashMap<String, u32>> = None;
#[derive(Debug, Deserialize)]
#[serde(untagged)]
enum MergeType {
Tuple(Vec<(String, String)>),
Legacy(Vec<String>),
}
let mut merges: Option<MergeType> = None;
while let Some(key) = map.next_key::<String>()? {
match key.as_ref() {
"dropout" => {
if let Some(dropout) = map.next_value()? {
builder = builder.dropout(dropout);
}
}
"unk_token" => {
if let Some(unk) = map.next_value()? {
builder = builder.unk_token(unk);
}
}
"continuing_subword_prefix" => {
if let Some(prefix) = map.next_value()? {
builder = builder.continuing_subword_prefix(prefix);
}
}
"end_of_word_suffix" => {
if let Some(suffix) = map.next_value()? {
builder = builder.end_of_word_suffix(suffix);
}
}
"fuse_unk" => {
if let Some(suffix) = map.next_value()? {
builder = builder.fuse_unk(suffix);
}
}
"byte_fallback" => {
if let Some(suffix) = map.next_value()? {
builder = builder.byte_fallback(suffix);
}
}
"ignore_merges" => {
if let Some(suffix) = map.next_value()? {
builder = builder.ignore_merges(suffix);
}
}
"vocab" => vocab = Some(map.next_value()?),
"merges" => merges = Some(map.next_value()?),
"type" => match map.next_value()? {
"BPE" => {}
u => {
return Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Str(u),
&"BPE",
))
}
},
_ => {}
}
}
if let (Some(vocab), Some(merges)) = (vocab, merges) {
let merges = match merges {
MergeType::Tuple(merges) => merges,
MergeType::Legacy(merges) => {
convert_merges_to_hashmap(merges.into_iter(), &vocab).map_err(Error::custom)?
}
};
builder = builder.vocab_and_merges(vocab, merges);
Ok(builder.build().map_err(Error::custom)?)
} else {
Err(Error::custom("Missing vocab/merges"))
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::models::bpe::Vocab;
#[test]
fn test_serialization() {
let vocab: Vocab = [
("<unk>".into(), 0),
("a".into(), 1),
("b".into(), 2),
("ab".into(), 3),
]
.iter()
.cloned()
.collect();
let bpe = BpeBuilder::default()
.vocab_and_merges(vocab, vec![("a".to_string(), "b".to_string())])
.unk_token("<unk>".to_string())
.ignore_merges(true)
.build()
.unwrap();
let legacy = r#"{"type":"BPE","dropout":null,"unk_token":"<unk>","continuing_subword_prefix":null,"end_of_word_suffix":null,"fuse_unk":false,"byte_fallback":false,"ignore_merges":true,"vocab":{"<unk>":0,"a":1,"b":2,"ab":3},"merges":["a b"]}"#;
let legacy = serde_json::from_str(legacy).unwrap();
assert_eq!(bpe, legacy);
let data = serde_json::to_string(&bpe).unwrap();
assert_eq!(
data,
r#"{"type":"BPE","dropout":null,"unk_token":"<unk>","continuing_subword_prefix":null,"end_of_word_suffix":null,"fuse_unk":false,"byte_fallback":false,"ignore_merges":true,"vocab":{"<unk>":0,"a":1,"b":2,"ab":3},"merges":[["a","b"]]}"#
);
let reconstructed = serde_json::from_str(&data).unwrap();
assert_eq!(bpe, reconstructed);
// With a space in the token
let vocab: Vocab = [
("<unk>".into(), 0),
("a".into(), 1),
("b c d".into(), 2),
("ab c d".into(), 3),
]
.iter()
.cloned()
.collect();
let bpe = BpeBuilder::default()
.vocab_and_merges(vocab, vec![("a".to_string(), "b c d".to_string())])
.unk_token("<unk>".to_string())
.ignore_merges(true)
.build()
.unwrap();
let data = serde_json::to_string(&bpe).unwrap();
assert_eq!(
data,
r#"{"type":"BPE","dropout":null,"unk_token":"<unk>","continuing_subword_prefix":null,"end_of_word_suffix":null,"fuse_unk":false,"byte_fallback":false,"ignore_merges":true,"vocab":{"<unk>":0,"a":1,"b c d":2,"ab c d":3},"merges":[["a","b c d"]]}"#
);
let reconstructed = serde_json::from_str(&data).unwrap();
assert_eq!(bpe, reconstructed);
}
#[test]
fn test_serialization_ignore_merges() {
let vocab: Vocab = [("<unk>".into(), 0), ("a".into(), 1), ("b".into(), 2)]
.iter()
.cloned()
.collect();
let mut bpe = BpeBuilder::default()
.vocab_and_merges(vocab, vec![])
.unk_token("<unk>".to_string())
.ignore_merges(true)
.build()
.unwrap();
let bpe_string = r#"{"type":"BPE","dropout":null,"unk_token":"<unk>","continuing_subword_prefix":null,"end_of_word_suffix":null,"fuse_unk":false,"byte_fallback":false,"ignore_merges":true,"vocab":{"<unk>":0,"a":1,"b":2},"merges":[]}"#;
assert_eq!(serde_json::from_str::<BPE>(bpe_string).unwrap(), bpe);
bpe.ignore_merges = false;
let bpe_string = r#"{"type":"BPE","dropout":null,"unk_token":"<unk>","continuing_subword_prefix":null,"end_of_word_suffix":null,"fuse_unk":false,"byte_fallback":false,"vocab":{"<unk>":0,"a":1,"b":2},"merges":[]}"#;
assert_eq!(serde_json::from_str::<BPE>(bpe_string).unwrap(), bpe);
}
}
| tokenizers/tokenizers/src/models/bpe/serialization.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/bpe/serialization.rs",
"repo_id": "tokenizers",
"token_count": 4848
} |
use crate::tokenizer::{NormalizedString, Normalizer, Result};
use serde::{Deserialize, Serialize};
use unicode_categories::UnicodeCategories;
/// Checks whether a character is whitespace
fn is_whitespace(c: char) -> bool {
// These are technically control characters but we count them as whitespace
match c {
'\t' | '\n' | '\r' => true,
_ => c.is_whitespace(),
}
}
/// Checks whether a character is a control character
fn is_control(c: char) -> bool {
// These are technically control characters but we count them as whitespace
match c {
'\t' | '\n' | '\r' => false,
// The definition of `is_control` here is quite large and contains also
// Cc, Cf, Cn or Co
// cf. https://unicode.org/reports/tr44/ (Table 12)
_ => c.is_other(),
}
}
/// Checks whether a character is chinese
/// This defines a "chinese character" as anything in the CJK Unicode block:
/// https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
///
/// Note that the CJK Unicode block is NOT all Japanese and Korean characters,
/// despite its name. The modern Korean Hangul alphabet is a different block,
/// as is Japanese Hiragana and Katakana. Those alphabets are used to write
/// space-separated words, so they are not treated specially and handled
/// like for all of the other languages.
fn is_chinese_char(c: char) -> bool {
matches!(
c as usize,
0x4E00..=0x9FFF |
0x3400..=0x4DBF |
0x20000..=0x2A6DF |
0x2A700..=0x2B73F |
0x2B740..=0x2B81F |
0x2B920..=0x2CEAF |
0xF900..=0xFAFF |
0x2F800..=0x2FA1F
)
}
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
#[serde(tag = "type")]
#[non_exhaustive]
pub struct BertNormalizer {
/// Whether to do the bert basic cleaning:
/// 1. Remove any control characters
/// 2. Replace all sorts of whitespace by the classic one ` `
pub clean_text: bool,
/// Whether to put spaces around chinese characters so they get split
pub handle_chinese_chars: bool,
/// Whether to strip accents
pub strip_accents: Option<bool>,
/// Whether to lowercase the input
pub lowercase: bool,
}
impl Default for BertNormalizer {
fn default() -> Self {
Self {
clean_text: true,
handle_chinese_chars: true,
strip_accents: None,
lowercase: true,
}
}
}
impl BertNormalizer {
pub fn new(
clean_text: bool,
handle_chinese_chars: bool,
strip_accents: Option<bool>,
lowercase: bool,
) -> Self {
Self {
clean_text,
handle_chinese_chars,
strip_accents,
lowercase,
}
}
fn do_clean_text(&self, normalized: &mut NormalizedString) {
normalized
.filter(|c| !(c as usize == 0 || c as usize == 0xfffd || is_control(c)))
.map(|c| if is_whitespace(c) { ' ' } else { c });
}
fn do_handle_chinese_chars(&self, normalized: &mut NormalizedString) {
let mut new_chars: Vec<(char, isize)> = vec![];
normalized.for_each(|c| {
if is_chinese_char(c) {
new_chars.extend([(' ', 0), (c, 1), (' ', 1)]);
} else {
new_chars.push((c, 0));
}
});
normalized.transform(new_chars, 0);
}
fn do_strip_accents(&self, normalized: &mut NormalizedString) {
normalized.nfd().filter(|c| !c.is_mark_nonspacing());
}
fn do_lowercase(&self, normalized: &mut NormalizedString) {
normalized.lowercase();
}
}
impl Normalizer for BertNormalizer {
fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> {
if self.clean_text {
self.do_clean_text(normalized);
}
if self.handle_chinese_chars {
self.do_handle_chinese_chars(normalized);
}
let strip_accents = self.strip_accents.unwrap_or(self.lowercase);
if strip_accents {
self.do_strip_accents(normalized);
}
if self.lowercase {
self.do_lowercase(normalized);
}
Ok(())
}
}
| tokenizers/tokenizers/src/normalizers/bert.rs/0 | {
"file_path": "tokenizers/tokenizers/src/normalizers/bert.rs",
"repo_id": "tokenizers",
"token_count": 1856
} |
use crate::pre_tokenizers::PreTokenizerWrapper;
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result};
use crate::utils::macro_rules_attribute;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Sequence {
pretokenizers: Vec<PreTokenizerWrapper>,
}
impl Sequence {
pub fn new(pretokenizers: Vec<PreTokenizerWrapper>) -> Self {
Self { pretokenizers }
}
}
impl AsRef<[PreTokenizerWrapper]> for Sequence {
fn as_ref(&self) -> &[PreTokenizerWrapper] {
&self.pretokenizers
}
}
impl AsMut<[PreTokenizerWrapper]> for Sequence {
fn as_mut(&mut self) -> &mut [PreTokenizerWrapper] {
&mut self.pretokenizers
}
}
impl IntoIterator for Sequence {
type Item = PreTokenizerWrapper;
type IntoIter = std::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.pretokenizers.into_iter()
}
}
impl PreTokenizer for Sequence {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
for pretokenizer in &self.pretokenizers {
pretokenizer.pre_tokenize(pretokenized)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::pre_tokenizers::{punctuation::Punctuation, whitespace::WhitespaceSplit};
use crate::{OffsetReferential, OffsetType};
#[test]
fn sequence_basic() {
let pretokenizers = vec![
PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit),
PreTokenizerWrapper::Punctuation(Punctuation::default()),
];
let pretok = Sequence::new(pretokenizers);
let mut pretokenized: PreTokenizedString = "Hey friend! How are you?!?".into();
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hey", (0, 3)),
("friend", (4, 10)),
("!", (10, 11)),
("How", (16, 19)),
("are", (20, 23)),
("you", (24, 27)),
("?", (27, 28)),
("!", (28, 29)),
("?", (29, 30)),
]
);
}
}
| tokenizers/tokenizers/src/pre_tokenizers/sequence.rs/0 | {
"file_path": "tokenizers/tokenizers/src/pre_tokenizers/sequence.rs",
"repo_id": "tokenizers",
"token_count": 1124
} |
use crate::{
normalizer::Range, Encoding, NormalizedString, OffsetReferential, Offsets, Result, Token,
};
use std::collections::HashMap;
/// Various possible types of offsets
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum OffsetType {
Byte,
Char,
None,
}
/// Wrapper for a subpart of a `NormalizedString`.
///
/// This Split contains the underlying `NormalizedString` as well as its offsets
/// in the original string. These offsets are in the `original` referential.
/// It also contains any `Token` associated to the current split
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Split {
/// The underlying `NormalizedString`. Each SubString is represented by a `NormalizedString`
/// and in the end we might be carrying a lot of SubString representing various parts of the
/// original input string.
normalized: NormalizedString,
/// Optional Tokens associated to this Split
tokens: Option<Vec<Token>>,
}
impl From<NormalizedString> for Split {
fn from(n: NormalizedString) -> Self {
Self {
normalized: n,
tokens: None,
}
}
}
impl From<(NormalizedString, Option<Vec<Token>>)> for Split {
fn from(f: (NormalizedString, Option<Vec<Token>>)) -> Self {
Self {
normalized: f.0,
tokens: f.1,
}
}
}
/// The `PreTokenizedString` is in charge of splitting an underlying string,
/// making sure everything is fine while doing so, and providing ways to normalize
/// and tokenize these splits.
/// Once everything has been normalized and tokenized, the `PreTokenizedString` is able
/// to build an `Encoding` with all the relevant offsets and word ids, relative to the
/// original string.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct PreTokenizedString {
original: String,
splits: Vec<Split>,
}
impl PreTokenizedString {
/// Split the `PreTokenizedString` by providing a `split_fn` in charge of splitting
/// each substring (`NormalizedString`) into multiple parts.
///
/// `split_fn` takes a `NormalizedString` and is in charge of returning an iterator
/// over the produced `NormalizedString`. `split_fn` is free of modifying these
/// `NormalizedString` as relevant, as long as it respects the constraint stated below.
///
/// There are only one constraint that *MUST* be respected:
/// > The produced `NormalizedString`, if combined back together, must have the
/// > same `original` string as the original one given to `split_fn`. This concretely
/// > means that for the offset tracking to work as expected, `split_fn` must produce
/// > "splits" of the original string.
pub fn split<F, U, R>(&mut self, mut split_fn: F) -> Result<()>
where
F: FnMut(usize, NormalizedString) -> Result<U>,
U: IntoIterator<Item = R>,
R: Into<Split>,
{
// new_splits is at least as big as self.splits
let mut new_splits = Vec::with_capacity(self.splits.len());
for (i, original_split) in self.splits.drain(..).enumerate() {
if original_split.tokens.is_some() {
new_splits.push(original_split);
continue;
}
new_splits.extend(
split_fn(i, original_split.normalized)?
.into_iter()
.filter_map(|split| {
let split: Split = split.into();
if split.normalized.is_empty() {
None
} else {
Some(split)
}
}),
);
}
self.splits = new_splits;
Ok(())
}
/// Normalized all the splits that do not have attached `Tokens`, using the provided
/// `normalize` function.
pub fn normalize<F>(&mut self, normalize: F) -> Result<()>
where
F: Fn(&mut NormalizedString) -> Result<()>,
{
for split in self.splits.iter_mut().filter(|s| s.tokens.is_none()) {
normalize(&mut split.normalized)?;
}
Ok(())
}
/// Tokenize all the splits that do not have attached `Tokens`, using the provided
/// `tokenize` function
pub fn tokenize<F>(&mut self, tokenize: F) -> Result<()>
where
F: Fn(&NormalizedString) -> Result<Vec<Token>>,
{
for split in self.splits.iter_mut().filter(|s| s.tokens.is_none()) {
split.tokens = Some(tokenize(&split.normalized)?);
}
Ok(())
}
/// Transform the current `PreTokenizedString` into an `Encoding`.
///
/// If a `word_idx` is provided, any word in the generated `Encoding`
/// will be set to this value. This is generally used with pre-tokenized
/// input, that do not need the `PreTokenizedString` to generate word ids.
///
/// This method will fail if some splits do not have associated `Token`.
pub fn into_encoding(
self,
word_idx: Option<u32>,
type_id: u32,
offset_type: OffsetType,
) -> Result<Encoding> {
if self.splits.is_empty() {
Ok(Encoding::default())
} else if !self.splits.iter().all(|split| split.tokens.is_some()) {
Err("Split has not been tokenized, call `PreTokenizedString::tokenize` first".into())
} else {
let offset_converter = match offset_type {
OffsetType::Char => Some(BytesToCharOffsetConverter::new(&self.original)),
OffsetType::Byte => None,
OffsetType::None => {
let tokens = self
.splits
.into_iter()
.flat_map(|split| {
split.tokens.unwrap().into_iter().map(|token| {
// Replace this with the actual fields you need for the Encoding type
(token.id, String::with_capacity(0), (0, 0), None, 0)
})
})
.collect();
return Ok(tokens);
}
};
Ok(self
.splits
.into_iter()
.enumerate()
.flat_map(|(idx, split)| {
let normalized = split.normalized;
let offsets = normalized.offsets_original();
let offset_converter = &offset_converter;
split.tokens.unwrap().into_iter().map(move |token| {
let mut offsets = normalized
.convert_offsets(Range::Normalized(token.offsets.0..token.offsets.1))
.map_or(token.offsets, |range| {
(offsets.0 + range.start, offsets.0 + range.end)
});
// Convert to char offsets if relevant
if let Some(converter) = offset_converter {
offsets = converter.convert(offsets).unwrap_or(offsets);
}
(
token.id,
token.value,
offsets,
if word_idx.is_some() {
word_idx
} else {
Some(idx as u32)
},
type_id,
)
})
})
.collect())
}
}
/// Returns a list of splits, each of them being a slice of the normalized
/// string, the associated offsets either in original or normalized
/// referential, as well as the potention tokens
pub fn get_splits(
&self,
offset_ref: OffsetReferential,
offset_type: OffsetType,
) -> Vec<(&str, Offsets, &Option<Vec<Token>>)> {
let offset_converter = match offset_type {
OffsetType::Char => Some(BytesToCharOffsetConverter::new(&self.original)),
OffsetType::Byte => None,
OffsetType::None => None,
};
let mut offset = 0;
self.splits
.iter()
.map(|split| {
let mut offsets = match offset_ref {
OffsetReferential::Original => split.normalized.offsets_original(),
OffsetReferential::Normalized => {
let len = split.normalized.len();
offset += len;
(offset - len, offset)
}
};
// Convert to char offsets if relevant
if let Some(ref converter) = offset_converter {
offsets = converter.convert(offsets).unwrap_or(offsets);
}
(split.normalized.get(), offsets, &split.tokens)
})
.collect()
}
}
impl From<NormalizedString> for PreTokenizedString {
fn from(s: NormalizedString) -> Self {
Self {
original: s.get_original().to_owned(),
splits: vec![Split {
normalized: s,
tokens: None,
}],
}
}
}
impl From<&str> for PreTokenizedString {
fn from(s: &str) -> Self {
let normalized: NormalizedString = s.into();
normalized.into()
}
}
impl From<String> for PreTokenizedString {
fn from(s: String) -> Self {
let normalized: NormalizedString = s.into();
normalized.into()
}
}
struct BytesToCharOffsetConverter {
map: HashMap<usize, usize>,
}
impl BytesToCharOffsetConverter {
pub fn new(sequence: &str) -> Self {
Self {
map: sequence
.char_indices()
.enumerate()
.flat_map(|(i, (b, c))| {
let mut n = 0;
std::iter::repeat_with(move || {
let o = (b + n, i);
n += 1;
o
})
.take(c.len_utf8())
})
.collect(),
}
}
pub fn convert(&self, offsets: Offsets) -> Option<Offsets> {
match (self.map.get(&offsets.0), self.map.get(&offsets.1)) {
(Some(start), Some(end)) => Some((*start, *end)),
// If we reached the end, `end` is not in the map
(Some(start), None) => {
// But the one just before should be
let last = self.map.get(&(offsets.1 - 1)).copied().unwrap_or(start + 1);
Some((*start, last + 1))
}
_ => None,
}
}
}
| tokenizers/tokenizers/src/tokenizer/pre_tokenizer.rs/0 | {
"file_path": "tokenizers/tokenizers/src/tokenizer/pre_tokenizer.rs",
"repo_id": "tokenizers",
"token_count": 5310
} |
mod common;
use common::*;
use tokenizers::tokenizer::AddedToken;
macro_rules! check_offsets {
($input: expr, $output:expr, $offset:expr, $result:expr) => {
let offsets = $output.get_offsets()[$offset];
assert_eq!(&$input[offsets.0..offsets.1], $result);
};
}
#[test]
fn byte_level_basic() {
// Without trimming offsets
let tokenizer = get_byte_level(true, false);
let input = "Hello there, how are you?";
let output = tokenizer.encode(input, false).unwrap();
check_offsets!(input, output, 0, "Hello");
check_offsets!(input, output, 1, " there");
check_offsets!(input, output, 2, ",");
check_offsets!(input, output, 3, " how");
check_offsets!(input, output, 4, " are");
check_offsets!(input, output, 5, " you");
check_offsets!(input, output, 6, "?");
// And when trimming offsets:
let tokenizer = get_byte_level(true, true);
let input = "Hello there, how are you?";
let output = tokenizer.encode(input, false).unwrap();
check_offsets!(input, output, 0, "Hello");
check_offsets!(input, output, 1, "there");
check_offsets!(input, output, 2, ",");
check_offsets!(input, output, 3, "how");
check_offsets!(input, output, 4, "are");
check_offsets!(input, output, 5, "you");
check_offsets!(input, output, 6, "?");
}
#[test]
fn byte_level_unicode() {
let tokenizer = get_byte_level(true, false);
let input = "i⭢j";
let output = tokenizer.encode(input, false).unwrap();
check_offsets!(input, output, 1, "⭢");
check_offsets!(input, output, 2, "⭢");
check_offsets!(input, output, 3, "⭢");
}
#[test]
fn byte_level_double_sequence() {
let input_a = "My name is Anthony";
let input_b = "What is my name?";
// Without trimming offsets
let tokenizer = get_byte_level(true, false);
let output = tokenizer.encode((input_a, input_b), false).unwrap();
let offsets = output.get_offsets();
assert_eq!(
offsets,
&[
(0, 2),
(2, 7),
(7, 10),
(10, 18),
(0, 4),
(4, 7),
(7, 10),
(10, 15),
(15, 16)
]
);
assert_eq!(
output.get_word_ids(),
&[
Some(0),
Some(1),
Some(2),
Some(3),
Some(0),
Some(1),
Some(2),
Some(3),
Some(4)
]
);
assert_eq!(output.get_type_ids(), &[0, 0, 0, 0, 1, 1, 1, 1, 1]);
// When trimming offsets
let tokenizer = get_byte_level(true, true);
let output = tokenizer.encode((input_a, input_b), false).unwrap();
let offsets = output.get_offsets();
assert_eq!(
offsets,
&[
(0, 2),
(3, 7),
(8, 10),
(11, 18),
(0, 4),
(5, 7),
(8, 10),
(11, 15),
(15, 16)
]
);
}
#[test]
fn byte_level_pre_tokenized_sequence() {
let input = ["My", "name", "is", "Anthonino"];
// Without trimming offsets
let tokenizer = get_byte_level(true, false);
let output = tokenizer.encode(&input[..], false).unwrap();
assert_eq!(
output.get_tokens(),
&["ĠMy", "Ġname", "Ġis", "ĠAnth", "on", "ino"]
);
assert_eq!(
output.get_word_ids(),
&[Some(0), Some(1), Some(2), Some(3), Some(3), Some(3)]
);
assert_eq!(
output.get_offsets(),
&[(0, 2), (0, 4), (0, 2), (0, 4), (4, 6), (6, 9)]
);
}
#[test]
#[ignore]
fn byte_level_pre_tokenized_sequence_with_trimming() {
let input = ["My", "name", "is", "Anthonino"];
// When trimming offsets (expect same result)
let tokenizer = get_byte_level(true, true);
let output = tokenizer.encode(&input[..], false).unwrap();
assert_eq!(
output.get_word_ids(),
&[Some(0), Some(1), Some(2), Some(3), Some(3), Some(3)]
);
assert_eq!(
output.get_offsets(),
&[(0, 2), (0, 4), (0, 2), (0, 4), (4, 6), (6, 9)]
);
}
#[test]
fn split_on_added_tokens_bert() {
let input = "Yesterday I saw a [MASK] far away";
let mut tokenizer = get_bert();
tokenizer.add_special_tokens(&[AddedToken::from("[MASK]", true)]);
let output = tokenizer.encode(input, false).unwrap();
assert_eq!(
output.get_offsets(),
&[
(0, 9),
(10, 11),
(12, 15),
(16, 17),
(18, 24),
(25, 28),
(29, 33)
]
);
assert_eq!(
output.get_tokens(),
&["yesterday", "i", "saw", "a", "[MASK]", "far", "away"]
);
assert_eq!(
output.get_word_ids(),
&[
Some(0),
Some(1),
Some(2),
Some(3),
Some(4),
Some(5),
Some(6)
]
);
}
| tokenizers/tokenizers/tests/offsets.rs/0 | {
"file_path": "tokenizers/tokenizers/tests/offsets.rs",
"repo_id": "tokenizers",
"token_count": 2497
} |
{
"overrides": [
{
"files": ["tests/**/*.js"],
"options": {
"printWidth": 10000000
}
}
]
}
| transformers.js/.prettierrc/0 | {
"file_path": "transformers.js/.prettierrc",
"repo_id": "transformers.js",
"token_count": 108
} |
import './style.css';
import * as THREE from 'three';
import { OrbitControls } from 'three/addons/controls/OrbitControls.js';
import { pipeline, env, RawImage } from '@xenova/transformers';
// Since we will download the model from the Hugging Face Hub, we can skip the local model check
env.allowLocalModels = false;
// Proxy the WASM backend to prevent the UI from freezing
env.backends.onnx.wasm.proxy = true;
// Constants
const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/bread_small.png';
const DEFAULT_SCALE = 0.75;
// Reference the elements that we will need
const status = document.getElementById('status');
const fileUpload = document.getElementById('upload');
const imageContainer = document.getElementById('container');
const example = document.getElementById('example');
// Create a new depth-estimation pipeline
status.textContent = 'Loading model...';
const depth_estimator = await pipeline('depth-estimation', 'Xenova/depth-anything-small-hf');
status.textContent = 'Ready';
example.addEventListener('click', (e) => {
e.preventDefault();
predict(EXAMPLE_URL);
});
fileUpload.addEventListener('change', function (e) {
const file = e.target.files[0];
if (!file) {
return;
}
const reader = new FileReader();
// Set up a callback when the file is loaded
reader.onload = e2 => predict(e2.target.result);
reader.readAsDataURL(file);
});
let onSliderChange;
// Predict depth map for the given image
async function predict(url) {
imageContainer.innerHTML = '';
const image = await RawImage.fromURL(url);
// Set up scene and slider controls
const { canvas, setDisplacementMap } = setupScene(url, image.width, image.height);
imageContainer.append(canvas);
status.textContent = 'Analysing...';
const { depth } = await depth_estimator(image);
setDisplacementMap(depth.toCanvas());
status.textContent = '';
// Add slider control
const slider = document.createElement('input');
slider.type = 'range';
slider.min = 0;
slider.max = 1;
slider.step = 0.01;
slider.addEventListener('input', (e) => {
onSliderChange(parseFloat(e.target.value));
});
slider.defaultValue = DEFAULT_SCALE;
imageContainer.append(slider);
}
function setupScene(url, w, h) {
// Create new scene
const canvas = document.createElement('canvas');
const width = canvas.width = imageContainer.offsetWidth;
const height = canvas.height = imageContainer.offsetHeight;
const scene = new THREE.Scene();
// Create camera and add it to the scene
const camera = new THREE.PerspectiveCamera(30, width / height, 0.01, 10);
camera.position.z = 2;
scene.add(camera);
const renderer = new THREE.WebGLRenderer({ canvas, antialias: true });
renderer.setSize(width, height);
renderer.setPixelRatio(window.devicePixelRatio);
// Add ambient light
const light = new THREE.AmbientLight(0xffffff, 2);
scene.add(light);
// Load depth texture
const image = new THREE.TextureLoader().load(url);
image.colorSpace = THREE.SRGBColorSpace;
const material = new THREE.MeshStandardMaterial({
map: image,
side: THREE.DoubleSide,
});
material.displacementScale = DEFAULT_SCALE;
const setDisplacementMap = (canvas) => {
material.displacementMap = new THREE.CanvasTexture(canvas);
material.needsUpdate = true;
}
const setDisplacementScale = (scale) => {
material.displacementScale = scale;
material.needsUpdate = true;
}
onSliderChange = setDisplacementScale;
// Create plane and rescale it so that max(w, h) = 1
const [pw, ph] = w > h ? [1, h / w] : [w / h, 1];
const geometry = new THREE.PlaneGeometry(pw, ph, w, h);
const plane = new THREE.Mesh(geometry, material);
scene.add(plane);
// Add orbit controls
const controls = new OrbitControls(camera, renderer.domElement);
controls.enableDamping = true;
renderer.setAnimationLoop(() => {
renderer.render(scene, camera);
controls.update();
});
window.addEventListener('resize', () => {
const width = imageContainer.offsetWidth;
const height = imageContainer.offsetHeight;
camera.aspect = width / height;
camera.updateProjectionMatrix();
renderer.setSize(width, height);
}, false);
return {
canvas: renderer.domElement,
setDisplacementMap,
};
}
| transformers.js/examples/depth-anything-client/main.js/0 | {
"file_path": "transformers.js/examples/depth-anything-client/main.js",
"repo_id": "transformers.js",
"token_count": 1584
} |
* {
box-sizing: border-box;
padding: 0;
margin: 0;
font-family: sans-serif;
}
html,
body {
height: 100%;
}
body {
padding: 16px 32px;
}
body,
#container,
#upload-button {
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
}
h1,
h4 {
text-align: center;
}
h4 {
margin-top: 0.5rem;
}
#container {
position: relative;
width: 720px;
height: 480px;
max-width: 100%;
max-height: 100%;
border: 2px dashed #D1D5DB;
border-radius: 0.75rem;
overflow: hidden;
margin-top: 1rem;
background-size: 100% 100%;
background-position: center;
background-repeat: no-repeat;
}
#upload-button {
gap: 0.4rem;
font-size: 18px;
cursor: pointer;
}
#upload {
display: none;
}
svg {
pointer-events: none;
}
#example {
font-size: 14px;
text-decoration: underline;
cursor: pointer;
}
#example:hover {
color: #2563EB;
}
canvas {
position: absolute;
width: 100%;
height: 100%;
}
#status {
min-height: 16px;
margin: 8px 0;
} | transformers.js/examples/remove-background-client/style.css/0 | {
"file_path": "transformers.js/examples/remove-background-client/style.css",
"repo_id": "transformers.js",
"token_count": 422
} |
import { env, AutoTokenizer, ClapTextModelWithProjection } from '@xenova/transformers';
import { getCachedFile } from './utils';
// Skip local model check
env.allowLocalModels = false;
class ApplicationSingleton {
static model_id = 'Xenova/larger_clap_music_and_speech';
static BASE_URL = 'https://huggingface.co/datasets/Xenova/MusicBenchEmbedded/resolve/main/';
static tokenizer = null;
static text_model = null;
static embeddings = null;
static async getInstance(progress_callback = null) {
this.tokenizer ??= AutoTokenizer.from_pretrained(this.model_id, { progress_callback });
this.text_model ??= ClapTextModelWithProjection.from_pretrained(this.model_id, {
progress_callback,
quantized: true, // TODO allow user to select quantized or not
});
this.embeddings ??= new Promise(
(resolve, reject) => {
getCachedFile(this.BASE_URL + 'audio-embeddings_52768-512_32bit.bin')
.then((buffer) => {
resolve(new Float32Array(buffer));
})
.catch(reject);
}
);
return Promise.all([this.tokenizer, this.text_model, this.embeddings]);
}
}
function cosineSimilarity(query_embeds, database_embeds) {
const EMBED_DIM = 512;
const numDB = database_embeds.length / EMBED_DIM;
const similarityScores = new Array(numDB);
for (let i = 0; i < numDB; ++i) {
const startOffset = i * EMBED_DIM;
const dbVector = database_embeds.slice(startOffset, startOffset + EMBED_DIM);
let dotProduct = 0;
let normEmbeds = 0;
let normDB = 0;
for (let j = 0; j < EMBED_DIM; ++j) {
const embedValue = query_embeds[j];
const dbValue = dbVector[j];
dotProduct += embedValue * dbValue;
normEmbeds += embedValue * embedValue;
normDB += dbValue * dbValue;
}
similarityScores[i] = dotProduct / (Math.sqrt(normEmbeds) * Math.sqrt(normDB));
}
return similarityScores;
}
// Listen for messages from the main thread
self.addEventListener('message', async (event) => {
// Get the tokenizer, model, and embeddings. When called for the first time,
// this will load the files and cache them for future use.
const [tokenizer, text_model, embeddings] = await ApplicationSingleton.getInstance(self.postMessage);
// Send the output back to the main thread
self.postMessage({ status: 'ready' });
// Run tokenization
const text_inputs = tokenizer(event.data.query, { padding: true, truncation: true });
// Compute embeddings
const { text_embeds } = await text_model(text_inputs);
// Compute similarity scores
const scores = cosineSimilarity(text_embeds.data, embeddings);
const output = scores
.map((score, i) => [score, i]) // Save index
.sort((a, b) => b[0] - a[0]) // Sort by scores
.slice(0, 100); // Get top 100 results
// Send the output back to the main thread
self.postMessage({
status: 'complete',
output: output,
});
});
| transformers.js/examples/semantic-audio-search/worker.js/0 | {
"file_path": "transformers.js/examples/semantic-audio-search/worker.js",
"repo_id": "transformers.js",
"token_count": 1294
} |
import './globals.css'
import { Inter } from 'next/font/google'
const inter = Inter({ subsets: ['latin'] })
export const metadata = {
title: 'In-browser Semantic Image Search',
description: 'Search for images using text (built w/ Transformers.js)',
}
export default function RootLayout({ children }) {
return (
<html lang="en">
<body className={inter.className}>{children}</body>
</html>
)
}
| transformers.js/examples/semantic-image-search-client/src/app/layout.js/0 | {
"file_path": "transformers.js/examples/semantic-image-search-client/src/app/layout.js",
"repo_id": "transformers.js",
"token_count": 139
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Transformers.js - Text-to-speech demo</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="/src/main.jsx"></script>
</body>
</html>
| transformers.js/examples/text-to-speech-client/index.html/0 | {
"file_path": "transformers.js/examples/text-to-speech-client/index.html",
"repo_id": "transformers.js",
"token_count": 136
} |
{
"name": "webgpu-video-depth-estimation",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "vite build",
"preview": "vite preview"
},
"devDependencies": {
"vite": "^5.2.0"
},
"dependencies": {
"@xenova/transformers": "github:xenova/transformers.js#v3"
}
}
| transformers.js/examples/webgpu-video-depth-estimation/package.json/0 | {
"file_path": "transformers.js/examples/webgpu-video-depth-estimation/package.json",
"repo_id": "transformers.js",
"token_count": 157
} |
export default function BotIcon(props) {
return (
<svg
{...props}
xmlns="http://www.w3.org/2000/svg"
width="24"
height="24"
viewBox="0 0 24 24"
fill="none"
stroke="currentColor"
strokeWidth="2"
strokeLinecap="round"
strokeLinejoin="round"
>
<path d="M12 8V4H8" />
<rect width="16" height="12" x="4" y="8" rx="2" />
<path d="M2 14h2" />
<path d="M20 14h2" />
<path d="M15 13v2" />
<path d="M9 13v2" />
</svg>
)
} | transformers.js/examples/webgpu-vlm/src/components/icons/BotIcon.jsx/0 | {
"file_path": "transformers.js/examples/webgpu-vlm/src/components/icons/BotIcon.jsx",
"repo_id": "transformers.js",
"token_count": 392
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.