text
stringlengths
96
319k
id
stringlengths
14
178
metadata
dict
# coding=utf-8 # Copyright 2022 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.models.opt.modeling_opt import OPTAttention from transformers.testing_utils import ( apply_skip_if_not_implemented, is_bitsandbytes_available, is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu_if_bnb_not_multi_backend_enabled, require_torch_multi_gpu, slow, torch_device, ) def get_some_linear_layer(model): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc elif model.config.model_type == "opt": try: return model.decoder.layers[0].fc1 except AttributeError: # for AutoModelforCausalLM return model.model.decoder.layers[0].fc1 elif model.config.model_type == "llama": return model.model.layers[0].mlp.gate_proj else: return model.transformer.h[0].mlp.dense_4h_to_h if is_torch_available(): import torch import torch.nn as nn class LoRALayer(nn.Module): """Wraps a linear layer with LoRA-like adapter - Used for testing purposes only""" def __init__(self, module: nn.Module, rank: int): super().__init__() self.module = module self.adapter = nn.Sequential( nn.Linear(module.in_features, rank, bias=False), nn.Linear(rank, module.out_features, bias=False), ) small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight, std=small_std) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def forward(self, input, *args, **kwargs): return self.module(input, *args, **kwargs) + self.adapter(input) if is_bitsandbytes_available(): import bitsandbytes as bnb @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu_if_bnb_not_multi_backend_enabled @slow class Base4bitTest(unittest.TestCase): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module model_name = "bigscience/bloom-1b7" # Constant values EXPECTED_RELATIVE_DIFFERENCE = ( 2.109659552692574 # This was obtained on a RTX Titan so the number might slightly change ) input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I") EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n") EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University") EXPECTED_OUTPUTS.add("Hello my name is John and I am 25 years old.") MAX_NEW_TOKENS = 10 def setUp(self): # Models and tokenizer self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) @apply_skip_if_not_implemented class Bnb4BitTest(Base4bitTest): def setUp(self): super().setUp() # Models and tokenizer self.model_fp16 = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.float16, device_map="auto" ) self.model_4bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.model_fp16 del self.model_4bit gc.collect() torch.cuda.empty_cache() def test_quantization_num_parameters(self): r""" Test if the number of returned parameters is correct See: https://github.com/huggingface/transformers/issues/25978 """ num_params_4bit = self.model_4bit.num_parameters() num_params_fp16 = self.model_fp16.num_parameters() self.assertEqual(num_params_4bit, num_params_fp16) def test_quantization_config_json_serialization(self): r""" A simple test to check if the quantization config is correctly serialized and deserialized """ config = self.model_4bit.config self.assertTrue(hasattr(config, "quantization_config")) _ = config.to_dict() _ = config.to_diff_dict() _ = config.to_json_string() def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Params4bit mem_fp16 = self.model_fp16.get_memory_footprint() mem_4bit = self.model_4bit.get_memory_footprint() self.assertAlmostEqual(mem_fp16 / mem_4bit, self.EXPECTED_RELATIVE_DIFFERENCE, delta=1e-5) linear = get_some_linear_layer(self.model_4bit) self.assertTrue(linear.weight.__class__ == Params4bit) def test_original_dtype(self): r""" A simple test to check if the model succesfully stores the original dtype """ self.assertTrue(hasattr(self.model_4bit.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.model_4bit.config._pre_quantization_dtype == torch.float16) def test_linear_are_4bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from transformers import T5PreTrainedModel self.model_fp16.get_memory_footprint() self.model_4bit.get_memory_footprint() for name, module in self.model_4bit.named_modules(): if isinstance(module, torch.nn.Linear): if name not in ["lm_head"] + T5PreTrainedModel._keep_in_fp32_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uint8) def test_rwkv_4bit(self): r""" A simple test to check if 4-bit RWKV inference works as expected. """ model_id = "RWKV/rwkv-4-169m-pile" quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True) model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config) tok = AutoTokenizer.from_pretrained(model_id) text = "Hello my name is" input_ids = tok.encode(text, return_tensors="pt").to(torch_device) _ = model.generate(input_ids, max_new_tokens=30) def test_generate_quality(self): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = self.model_4bit.generate( input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality_config(self): r""" Test that loading the model with the config is equivalent """ bnb_config = BitsAndBytesConfig() bnb_config.load_in_4bit = True model_4bit_from_config = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_4bit_from_config.generate( input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality_dequantize(self): r""" Test that loading the model and unquantize it produce correct results """ bnb_config = BitsAndBytesConfig(load_in_4bit=True) model_4bit = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) model_4bit.dequantize() encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_4bit.generate( input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_device_assignment(self): if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.43.2"): self.skipTest(reason="This test requires bitsandbytes >= 0.43.2") mem_before = self.model_4bit.get_memory_footprint() # Move to CPU self.model_4bit.to("cpu") self.assertEqual(self.model_4bit.device.type, "cpu") self.assertAlmostEqual(self.model_4bit.get_memory_footprint(), mem_before) if torch.cuda.is_available(): # Move back to CUDA device self.model_4bit.to("cuda") self.assertEqual(self.model_4bit.device.type, "cuda") self.assertAlmostEqual(self.model_4bit.get_memory_footprint(), mem_before) def test_device_and_dtype_assignment(self): r""" Test whether attempting to change the device or cast the dtype of a model after converting it to 4-bit precision will raise an appropriate error. The test ensures that such operations are prohibited on 4-bit models to prevent invalid conversions. """ # Moving with `to` or `cuda` is not supported with versions < 0.43.2. if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.43.2"): with self.assertRaises(ValueError): # Tries with `str` self.model_4bit.to("cpu") with self.assertRaises(ValueError): # Tries with a `device` self.model_4bit.to(torch.device("cuda:0")) with self.assertRaises(ValueError): # Tries with `cuda` self.model_4bit.cuda() with self.assertRaises(ValueError): # Tries with a `dtype` self.model_4bit.to(torch.float16) with self.assertRaises(ValueError): # Tries to cast the 4-bit model to float32 using `float()` self.model_4bit.float() with self.assertRaises(ValueError): # Tries to cast the 4-bit model to float16 using `half()` self.model_4bit.half() # Test if we did not break anything self.model_4bit.to(torch.device(torch_device)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") self.model_fp16 = self.model_fp16.to(torch.float32) _ = self.model_fp16.generate(input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10) if torch.cuda.is_available(): # Check that this does not throw an error _ = self.model_fp16.cuda() # Check this does not throw an error _ = self.model_fp16.to("cpu") # Check this does not throw an error _ = self.model_fp16.half() # Check this does not throw an error _ = self.model_fp16.float() def test_fp32_4bit_conversion(self): r""" Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small", load_in_4bit=True, device_map="auto") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) def test_bnb_4bit_wrong_config(self): r""" Test whether creating a bnb config with unsupported values leads to errors. """ with self.assertRaises(ValueError): _ = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_storage="add") @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu_if_bnb_not_multi_backend_enabled @slow @apply_skip_if_not_implemented class Bnb4BitT5Test(unittest.TestCase): @classmethod def setUpClass(cls): cls.model_name = "google-t5/t5-small" cls.dense_act_model_name = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.input_text = "Translate in German: Hello, my dog is cute" def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ gc.collect() torch.cuda.empty_cache() def test_inference_without_keep_in_fp32(self): r""" Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration modules = T5ForConditionalGeneration._keep_in_fp32_modules T5ForConditionalGeneration._keep_in_fp32_modules = None # test with `google-t5/t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_4bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) T5ForConditionalGeneration._keep_in_fp32_modules = modules def test_inference_with_keep_in_fp32(self): r""" Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration # test with `google-t5/t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear4bit)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_4bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) @apply_skip_if_not_implemented class Classes4BitModelTest(Base4bitTest): def setUp(self): super().setUp() # model_name self.model_name = "bigscience/bloom-560m" self.seq_to_seq_name = "google-t5/t5-small" # Different types of model self.base_model = AutoModel.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") # Sequence classification model self.sequence_model = AutoModelForSequenceClassification.from_pretrained( self.model_name, load_in_4bit=True, device_map="auto" ) # CausalLM model self.model_4bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") # Seq2seq model self.seq_to_seq_model = AutoModelForSeq2SeqLM.from_pretrained( self.seq_to_seq_name, load_in_4bit=True, device_map="auto" ) def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.base_model del self.sequence_model del self.model_4bit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def test_correct_head_class(self): r""" A simple test to check if the last modules for some classes (AutoModelForCausalLM or SequenceClassification) are kept in their native class. """ from bitsandbytes.nn import Params4bit self.assertTrue(self.base_model.h[-1].mlp.dense_4h_to_h.weight.__class__ == Params4bit) # Other heads should be nn.Parameter self.assertTrue(self.model_4bit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) @apply_skip_if_not_implemented class Pipeline4BitTest(Base4bitTest): def setUp(self): super().setUp() def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ if hasattr(self, "pipe"): del self.pipe gc.collect() torch.cuda.empty_cache() def test_pipeline(self): r""" The aim of this test is to verify that the mixed 4bit is compatible with `pipeline` from transformers. Since we used pipline for inference speed benchmarking we want to make sure that this feature does not break anything on pipline. """ # self._clear_cuda_cache() self.pipe = pipeline( "text-generation", model=self.model_name, model_kwargs={ "device_map": "auto", "load_in_4bit": True, # float16 isn't supported on CPU, use bfloat16 instead "torch_dtype": torch.bfloat16 if torch_device == "cpu" else torch.float16, }, max_new_tokens=self.MAX_NEW_TOKENS, ) # Real second forward pass pipeline_output = self.pipe(self.input_text) self.assertIn(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUTS) @require_torch_multi_gpu @apply_skip_if_not_implemented class Bnb4bitTestMultiGpu(Base4bitTest): def setUp(self): super().setUp() def test_multi_gpu_loading(self): r""" This tests that the model has been loaded and can be used correctly on a multi-GPU setup. Let's just try to load a model on 2 GPUs and see if it works. The model we test has ~2GB of total, 3GB should suffice """ model_parallel = AutoModelForCausalLM.from_pretrained( self.model_name, load_in_4bit=True, device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values()), {0, 1}) # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Second real batch output_parallel = model_parallel.generate( input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) @apply_skip_if_not_implemented class Bnb4BitTestTraining(Base4bitTest): def setUp(self): self.model_name = "facebook/opt-350m" super().setUp() def test_training(self): if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.0"): self.skipTest(reason="This test requires bitsandbytes >= 0.37.0") # Step 1: freeze all parameters model = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_4bit=True) if torch.cuda.is_available(): self.assertEqual(set(model.hf_device_map.values()), {torch.cuda.current_device()}) elif torch.xpu.is_available(): self.assertEqual(set(model.hf_device_map.values()), {f"xpu:{torch.xpu.current_device()}"}) else: self.assertTrue(all(param.device.type == "cpu" for param in model.parameters())) for param in model.parameters(): param.requires_grad = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability param.data = param.data.to(torch.float32) # Step 2: add adapters for _, module in model.named_modules(): if isinstance(module, OPTAttention): module.q_proj = LoRALayer(module.q_proj, rank=16) module.k_proj = LoRALayer(module.k_proj, rank=16) module.v_proj = LoRALayer(module.v_proj, rank=16) # Step 3: dummy batch batch = self.tokenizer("Test batch ", return_tensors="pt").to(torch_device) # Step 4: Check if the gradient is not None with torch.autocast(torch_device): out = model.forward(**batch) out.logits.norm().backward() for module in model.modules(): if isinstance(module, LoRALayer): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(module, nn.Embedding): self.assertTrue(module.weight.grad is None) @apply_skip_if_not_implemented @unittest.skipIf(torch_device == "xpu", reason="XPU has precision issue on gpt model, will test it once fixed") class Bnb4BitGPT2Test(Bnb4BitTest): model_name = "openai-community/gpt2-xl" EXPECTED_RELATIVE_DIFFERENCE = 3.3191854854152187 @apply_skip_if_not_implemented class Bnb4BitLlamaTest(Bnb4BitTest): model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" EXPECTED_RELATIVE_DIFFERENCE = 2.9461410686392764 @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu_if_bnb_not_multi_backend_enabled @slow @apply_skip_if_not_implemented class BaseSerializationTest(unittest.TestCase): model_name = "facebook/opt-125m" input_text = "Mars colonists' favorite meals are" def tearDown(self): gc.collect() torch.cuda.empty_cache() def test_serialization(self, quant_type="nf4", double_quant=True, safe_serialization=True): r""" Test whether it is possible to serialize a model in 4-bit. Uses most typical params as default. See ExtendedSerializationTest class for more params combinations. """ tokenizer = AutoTokenizer.from_pretrained(self.model_name) self.quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type=quant_type, bnb_4bit_use_double_quant=double_quant, bnb_4bit_compute_dtype=torch.bfloat16, ) model_0 = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=self.quantization_config, device_map=torch_device, ) with tempfile.TemporaryDirectory() as tmpdirname: model_0.save_pretrained(tmpdirname, safe_serialization=safe_serialization) config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_1 = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=torch_device) # checking quantized linear module weight linear = get_some_linear_layer(model_1) self.assertTrue(linear.weight.__class__ == bnb.nn.Params4bit) self.assertTrue(hasattr(linear.weight, "quant_state")) self.assertTrue(linear.weight.quant_state.__class__ == bnb.functional.QuantState) # checking memory footpring self.assertAlmostEqual(model_0.get_memory_footprint() / model_1.get_memory_footprint(), 1, places=2) # Matching all parameters and their quant_state items: d0 = dict(model_0.named_parameters()) d1 = dict(model_1.named_parameters()) self.assertTrue(d0.keys() == d1.keys()) for k in d0.keys(): self.assertTrue(d0[k].shape == d1[k].shape) self.assertTrue(d0[k].device.type == d1[k].device.type) self.assertTrue(d0[k].device == d1[k].device) self.assertTrue(d0[k].dtype == d1[k].dtype) self.assertTrue(torch.equal(d0[k], d1[k].to(d0[k].device))) if isinstance(d0[k], bnb.nn.modules.Params4bit): for v0, v1 in zip( d0[k].quant_state.as_dict().values(), d1[k].quant_state.as_dict().values(), ): if isinstance(v0, torch.Tensor): # The absmax will not be saved in the quant_state when using NF4 in CPU if v0.numel() != 0: self.assertTrue(torch.equal(v0, v1.to(v0.device))) else: self.assertTrue(v0 == v1) # comparing forward() outputs encoded_input = tokenizer(self.input_text, return_tensors="pt").to(torch_device) out_0 = model_0(**encoded_input) out_1 = model_1(**encoded_input) torch.testing.assert_close(out_0["logits"], out_1["logits"], rtol=0.05, atol=0.05) # comparing generate() outputs encoded_input = tokenizer(self.input_text, return_tensors="pt").to(torch_device) output_sequences_0 = model_0.generate(**encoded_input, max_new_tokens=10) output_sequences_1 = model_1.generate(**encoded_input, max_new_tokens=10) def _decode(token): return tokenizer.decode(token, skip_special_tokens=True) self.assertEqual( [_decode(x) for x in output_sequences_0], [_decode(x) for x in output_sequences_1], ) @apply_skip_if_not_implemented class ExtendedSerializationTest(BaseSerializationTest): """ tests more combinations of parameters """ def test_nf4_single_unsafe(self): self.test_serialization(quant_type="nf4", double_quant=False, safe_serialization=False) def test_nf4_single_safe(self): self.test_serialization(quant_type="nf4", double_quant=False, safe_serialization=True) def test_nf4_double_unsafe(self): self.test_serialization(quant_type="nf4", double_quant=True, safe_serialization=False) # nf4 double safetensors quantization is tested in test_serialization() method from the parent class def test_fp4_single_unsafe(self): self.test_serialization(quant_type="fp4", double_quant=False, safe_serialization=False) def test_fp4_single_safe(self): self.test_serialization(quant_type="fp4", double_quant=False, safe_serialization=True) def test_fp4_double_unsafe(self): self.test_serialization(quant_type="fp4", double_quant=True, safe_serialization=False) def test_fp4_double_safe(self): self.test_serialization(quant_type="fp4", double_quant=True, safe_serialization=True) class BloomSerializationTest(BaseSerializationTest): """ default BaseSerializationTest config tested with Bloom family model """ model_name = "bigscience/bloom-560m" class GPTSerializationTest(BaseSerializationTest): """ default BaseSerializationTest config tested with GPT family model """ model_name = "openai-community/gpt2-xl" class LlamaSerializationTest(BaseSerializationTest): """ default BaseSerializationTest config tested with Llama family model """ model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" @require_bitsandbytes @require_accelerate @require_torch_gpu_if_bnb_not_multi_backend_enabled @slow @apply_skip_if_not_implemented class Bnb4BitTestBasicConfigTest(unittest.TestCase): def test_load_in_4_and_8_bit_fails(self): with self.assertRaisesRegex(ValueError, "load_in_4bit and load_in_8bit are both True"): AutoModelForCausalLM.from_pretrained("facebook/opt-125m", load_in_4bit=True, load_in_8bit=True) def test_set_load_in_8_bit(self): quantization_config = BitsAndBytesConfig(load_in_4bit=True) with self.assertRaisesRegex(ValueError, "load_in_4bit and load_in_8bit are both True"): quantization_config.load_in_8bit = True
transformers/tests/quantization/bnb/test_4bit.py/0
{ "file_path": "transformers/tests/quantization/bnb/test_4bit.py", "repo_id": "transformers", "token_count": 13017 }
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from transformers import AutoModelForCausalLM, AutoTokenizer, HqqConfig from transformers.testing_utils import ( require_accelerate, require_hqq, require_torch_gpu, require_torch_multi_gpu, slow, torch_device, ) from transformers.utils import is_hqq_available, is_torch_available if is_torch_available(): import torch if is_hqq_available(): from hqq.core.quantize import HQQBackend, HQQLinear class HQQLLMRunner: def __init__(self, model_id, quant_config, compute_dtype, device, cache_dir=None): self.model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=compute_dtype, device_map=device, quantization_config=quant_config, low_cpu_mem_usage=True, cache_dir=cache_dir, ) self.tokenizer = AutoTokenizer.from_pretrained(model_id, cache_dir=cache_dir) self.device = self.model.device HQQLinear.set_backend(HQQBackend.PYTORCH) def cleanup(): torch.cuda.empty_cache() gc.collect() def check_hqqlayer(test_module, hqq_layer, batch_size=1, context_size=1024): # Test HQQ layer W_dequant = hqq_layer.dequantize() # Reconstructed weights inputs = ( torch.randn( (batch_size, context_size, hqq_layer.meta["shape"][1]), device=hqq_layer.device, dtype=hqq_layer.compute_dtype, ) / 10.0 ) with torch.no_grad(): outputs = hqq_layer(inputs) test_module.assertEqual(outputs.shape[-1], W_dequant.shape[0]) test_module.assertEqual(outputs.dtype, hqq_layer.compute_dtype) del W_dequant, inputs, outputs cleanup() def check_forward(test_module, model, batch_size=1, context_size=1024): # Test forward pass with torch.no_grad(): out = model(torch.zeros([batch_size, context_size], device=model.device, dtype=torch.int32)).logits test_module.assertEqual(out.shape[0], batch_size) test_module.assertEqual(out.shape[1], context_size) cleanup() MODEL_ID = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" @require_torch_gpu @require_hqq class HqqConfigTest(unittest.TestCase): def test_to_dict(self): """ Makes sure the config format is properly set """ quantization_config = HqqConfig() hqq_orig_config = quantization_config.to_dict() self.assertEqual(quantization_config.quant_config, hqq_orig_config["quant_config"]) @slow @require_torch_gpu @require_accelerate @require_hqq class HQQTest(unittest.TestCase): def tearDown(self): cleanup() def test_fp16_quantized_model(self): """ Simple LLM model testing fp16 """ quant_config = HqqConfig(nbits=8, group_size=64) hqq_runner = HQQLLMRunner( model_id=MODEL_ID, quant_config=quant_config, compute_dtype=torch.float16, device=torch_device ) check_hqqlayer(self, hqq_runner.model.model.layers[0].self_attn.v_proj) check_forward(self, hqq_runner.model) @slow @require_torch_gpu @require_torch_multi_gpu @require_accelerate @require_hqq class HQQTestMultiGPU(unittest.TestCase): def tearDown(self): cleanup() def test_fp16_quantized_model_multipgpu(self): """ Simple LLM model testing fp16 with multi-gpu """ quant_config = HqqConfig(nbits=8, group_size=64) hqq_runner = HQQLLMRunner( model_id=MODEL_ID, quant_config=quant_config, compute_dtype=torch.float16, device="auto" ) check_hqqlayer(self, hqq_runner.model.model.layers[0].self_attn.v_proj) check_forward(self, hqq_runner.model) @slow @require_torch_gpu @require_accelerate @require_hqq class HQQSerializationTest(unittest.TestCase): def tearDown(self): cleanup() def test_model_serialization(self): """ Simple HQQ LLM save/load test """ quant_config = HqqConfig(nbits=4, group_size=64) hqq_runner = HQQLLMRunner( model_id=MODEL_ID, quant_config=quant_config, compute_dtype=torch.float16, device=torch_device ) input_tensor = torch.zeros((1, 8), dtype=torch.int32, device=torch_device) with torch.no_grad(): logits_ref = hqq_runner.model.forward(input_tensor).logits # Save saved_model_id = "quant_model" hqq_runner.model.save_pretrained(saved_model_id) # Remove old model del hqq_runner.model torch.cuda.empty_cache() # Load and check if the logits match model_loaded = AutoModelForCausalLM.from_pretrained( "quant_model", torch_dtype=torch.float16, device_map=torch_device, low_cpu_mem_usage=True ) with torch.no_grad(): logits_loaded = model_loaded.forward(input_tensor).logits self.assertEqual((logits_loaded - logits_ref).abs().mean().item(), 0)
transformers/tests/quantization/hqq/test_hqq.py/0
{ "file_path": "transformers/tests/quantization/hqq/test_hqq.py", "repo_id": "transformers", "token_count": 2377 }
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import copy import inspect import json import os import random import tempfile import unittest from importlib import import_module from math import isnan from typing import List, Tuple from datasets import Dataset from transformers import is_tf_available, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import ( # noqa: F401 CaptureLogger, _tf_gpu_memory_limit, is_pt_tf_cross_test, require_tf, require_tf2onnx, slow, torch_device, ) from transformers.utils import CONFIG_NAME, GENERATION_CONFIG_NAME, logging from transformers.utils.generic import ModelOutput logger = logging.get_logger(__name__) if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFAutoModel, TFAutoModelForSequenceClassification, TFSharedEmbeddings, ) from transformers.generation import ( TFBeamSampleDecoderOnlyOutput, TFBeamSampleEncoderDecoderOutput, TFBeamSearchDecoderOnlyOutput, TFBeamSearchEncoderDecoderOutput, TFGreedySearchDecoderOnlyOutput, TFGreedySearchEncoderDecoderOutput, TFSampleDecoderOnlyOutput, TFSampleEncoderDecoderOutput, ) from transformers.modeling_tf_utils import keras tf.config.experimental.enable_tensor_float_32_execution(False) if _tf_gpu_memory_limit is not None: gpus = tf.config.list_physical_devices("GPU") for gpu in gpus: # Restrict TensorFlow to only allocate x GB of memory on the GPUs try: tf.config.set_logical_device_configuration( gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)] ) logical_gpus = tf.config.list_logical_devices("GPU") print("Logical GPUs", logical_gpus) except RuntimeError as e: # Virtual devices must be set before GPUs have been initialized print(e) if is_torch_available(): import torch def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key: setattr(configs_no_init, key, 0.0) return configs_no_init @require_tf class TFModelTesterMixin: model_tester = None all_model_classes = () all_generative_model_classes = () test_mismatched_shapes = True test_resize_embeddings = True test_head_masking = True is_encoder_decoder = False has_attentions = True def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict: inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)) if isinstance(v, tf.Tensor) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING), *get_values(TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING), ]: inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING): inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING), *get_values(TF_MODEL_FOR_MASKED_LM_MAPPING), *get_values(TF_MODEL_FOR_PRETRAINING_MAPPING), *get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING), *get_values(TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING), ] and "labels" in dict(inspect.signature(model_class.call).parameters): inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) elif model_class in get_values(TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING): num_patches = self.model_tester.image_size // self.model_tester.patch_size inputs_dict["bool_masked_pos"] = tf.zeros( (self.model_tester.batch_size, num_patches**2), dtype=tf.int32 ) elif model_class in get_values(TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING): batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = tf.zeros((self.model_tester.batch_size, height, width), dtype=tf.int32) elif model_class.__name__.endswith("ForCTC"): # When we have enough CTC models for an AutoClass, we should use their mapping instead of name checks inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) return inputs_dict def test_initialization(self): pass def test_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=False) # the config file (and the generation config file, if it can generate) should be saved self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) self.assertEqual( model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) ) model = model_class.from_pretrained(tmpdirname) after_outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assert_outputs_same(after_outputs, outputs) def test_save_load_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) model_config = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(model_config) new_model = model_class.from_config(model.get_config()) # make sure it also accepts a normal config _ = model_class.from_config(model.config) _ = new_model(self._prepare_for_class(inputs_dict, model_class)) # Build model new_model.set_weights(model.get_weights()) after_outputs = new_model(self._prepare_for_class(inputs_dict, model_class)) self.assert_outputs_same(after_outputs, outputs) @slow def test_saved_model_creation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = False config.output_attentions = False if hasattr(config, "use_cache"): config.use_cache = False model_class = self.all_model_classes[0] class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) model(class_inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") self.assertTrue(os.path.exists(saved_model_dir)) def test_prepare_serving_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(inputs) serving_outputs = model.serving_output(outputs) for k, v in serving_outputs.items(): # Check that we have one of three possible outputs: None, tuple of tensors or a tensor if isinstance(v, tuple): self.assertTrue(all(isinstance(elem, tf.Tensor) for elem in v)) elif v is not None: self.assertIsInstance(v, tf.Tensor) else: self.assertIsNone(v) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend(["decoder_position_ids"] if "decoder_position_ids" in arg_names else []) expected_arg_names.extend( ["head_mask", "decoder_head_mask"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) expected_arg_names.extend( ["cross_attn_head_mask", "encoder_outputs"] if "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["input_ids"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_onnx_compliancy(self): if not self.test_onnx: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() INTERNAL_OPS = [ "Assert", "AssignVariableOp", "EmptyTensorList", "ReadVariableOp", "ResourceGather", "TruncatedNormal", "VarHandleOp", "VarIsInitializedOp", ] onnx_ops = [] with open(os.path.join(".", "utils", "tf_ops", "onnx.json")) as f: onnx_opsets = json.load(f)["opsets"] for i in range(1, self.onnx_min_opset + 1): onnx_ops.extend(onnx_opsets[str(i)]) for model_class in self.all_model_classes: model_op_names = set() with tf.Graph().as_default() as g: model = model_class(config) model.build_in_name_scope() for op in g.get_operations(): model_op_names.add(op.node_def.op) model_op_names = sorted(model_op_names) incompatible_ops = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(op) self.assertEqual(len(incompatible_ops), 0, incompatible_ops) # `tf2onnx` issue page: https://github.com/onnx/tensorflow-onnx/issues/2172 # TODO: undo skip once a fix is done in `tf2onnx` @unittest.skip("`tf2onnx` broke with TF 2.13") @require_tf2onnx @slow def test_onnx_runtime_optimize(self): if not self.test_onnx: return import onnxruntime import tf2onnx config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: model = model_class(config) model.build_in_name_scope() onnx_model_proto, _ = tf2onnx.convert.from_keras(model, opset=self.onnx_min_opset) onnxruntime.InferenceSession(onnx_model_proto.SerializeToString()) def test_keras_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() tf_main_layer_classes = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__),) for module_member_name in dir(module) if module_member_name.endswith("MainLayer") # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("MainLayer")] == model_class.__name__[: -len("Model")] for module_member in (getattr(module, module_member_name),) if isinstance(module_member, type) and keras.layers.Layer in module_member.__bases__ and getattr(module_member, "_keras_serializable", False) } for main_layer_class in tf_main_layer_classes: # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter if "T5" in main_layer_class.__name__: # Take the same values than in TFT5ModelTester for this shared layer shared = TFSharedEmbeddings(99, 32, name="shared") config.use_cache = inputs_dict.pop("use_cache", None) main_layer = main_layer_class(config, embed_tokens=shared) else: main_layer = main_layer_class(config) symbolic_inputs = { name: keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items() if tf.is_tensor(tensor) } model = keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs)) outputs = model(inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: filepath = os.path.join(tmpdirname, "keras_model.h5") model.save(filepath) if "T5" in main_layer_class.__name__: model = keras.models.load_model( filepath, custom_objects={ main_layer_class.__name__: main_layer_class, "TFSharedEmbeddings": TFSharedEmbeddings, }, ) else: model = keras.models.load_model( filepath, custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(model, keras.Model) after_outputs = model(inputs_dict) self.assert_outputs_same(after_outputs, outputs) def assert_outputs_same(self, after_outputs, outputs): # Make sure we don't have nans if isinstance(after_outputs, tf.Tensor): out_1 = after_outputs.numpy() elif isinstance(after_outputs, dict): out_1 = after_outputs[list(after_outputs.keys())[0]].numpy() else: out_1 = after_outputs[0].numpy() out_2 = outputs[0].numpy() self.assertEqual(out_1.shape, out_2.shape) out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) # Don't copy this method to model specific test file! # TODO: remove this method once the issues are all fixed! def _make_attention_mask_non_null(self, inputs_dict): """Make sure no sequence has all zeros as attention mask""" for k in ["attention_mask", "encoder_attention_mask", "decoder_attention_mask"]: if k in inputs_dict: attention_mask = inputs_dict[k] # Make sure no all 0s attention masks - to avoid failure at this moment. # Put `1` at the beginning of sequences to make it still work when combining causal attention masks. # TODO: remove this line once a fix regarding large negative values for attention mask is done. attention_mask = tf.concat( [tf.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], axis=-1 ) # Here we make the first sequence with all 0s as attention mask. # Currently, this will fail for `TFWav2Vec2Model`. This is caused by the different large negative # values, like `1e-4`, `1e-9`, `1e-30` and `-inf` for attention mask across models/frameworks. # TODO: enable this block once the large negative values thing is cleaned up. # (see https://github.com/huggingface/transformers/issues/14859) # attention_mask = tf.concat( # [ # tf.zeros_like(attention_mask[:1], dtype=tf.int32), # tf.cast(attention_mask[1:], dtype=tf.int32) # ], # axis=0 # ) inputs_dict[k] = attention_mask # Don't copy this method to model specific test file! # TODO: remove this method once the issues are all fixed! def _postprocessing_to_ignore_test_cases(self, tf_outputs, pt_outputs, model_class): """For temporarily ignoring some failed test cases (issues to be fixed)""" tf_keys = {k for k, v in tf_outputs.items() if v is not None} pt_keys = {k for k, v in pt_outputs.items() if v is not None} key_differences = tf_keys.symmetric_difference(pt_keys) if model_class.__name__ in [ "TFFlaubertWithLMHeadModel", "TFFunnelForPreTraining", "TFElectraForPreTraining", "TFXLMWithLMHeadModel", ]: for k in key_differences: if k in ["loss", "losses"]: tf_keys.discard(k) pt_keys.discard(k) elif model_class.__name__.startswith("TFGPT2"): # `TFGPT2` has `past_key_values` as a tensor while `GPT2` has it as a tuple. tf_keys.discard("past_key_values") pt_keys.discard("past_key_values") # create new outputs from the remaining fields new_tf_outputs = type(tf_outputs)(**{k: tf_outputs[k] for k in tf_keys}) new_pt_outputs = type(pt_outputs)(**{k: pt_outputs[k] for k in pt_keys}) return new_tf_outputs, new_pt_outputs def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-4, name="outputs", attributes=None): """Check the outputs from PyTorch and TensorFlow models are close enough. Checks are done in a recursive way. Args: model_class: The class of the model that is currently testing. For example, `TFBertModel`, TFBertForMaskedLM`, `TFBertForSequenceClassification`, etc. Mainly used for providing more informative error messages. name (`str`): The name of the output. For example, `output.hidden_states`, `output.attentions`, etc. attributes (`Tuple[str]`): The names of the output's element if the output is a tuple/list with each element being a named field in the output. """ from transformers.cache_utils import DynamicCache self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(tf_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is", ) # Don't copy this block to model specific test file! # TODO: remove this method and this line after issues are fixed tf_outputs, pt_outputs = self._postprocessing_to_ignore_test_cases(tf_outputs, pt_outputs, model_class) tf_keys = [k for k, v in tf_outputs.items() if v is not None] pt_keys = [k for k, v in pt_outputs.items() if v is not None] self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `names` attributes = tuple([f"{name}.{k}" for k in tf_keys]) self.check_pt_tf_outputs( tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(tf_outputs) in [tuple, list]: self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch") self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch") if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(tf_outputs), f"{name}: The tuple `names` should have the same length as `tf_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `names` attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))]) for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes): if isinstance(pt_output, DynamicCache): pt_output = pt_output.to_legacy_cache() self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(tf_outputs, tf.Tensor): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is" ) tf_outputs = tf_outputs.numpy() pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(tf_outputs): tf_outputs = np.array([tf_outputs]) pt_outputs = np.array([pt_outputs]) tf_nans = np.isnan(tf_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[tf_nans] = 0 tf_outputs[tf_nans] = 0 pt_outputs[pt_nans] = 0 tf_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(tf_outputs - pt_outputs)) self.assertLessEqual(max_diff, tol, f"{name}: Difference between torch and tf is {max_diff} (>= {tol}).") else: raise ValueError( "`tf_outputs` should be an instance of `tf.Tensor`, a `tuple`, or an instance of `tf.Tensor`. Got" f" {type(tf_outputs)} instead." ) def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict): pt_inputs_dict = {} for name, key in tf_inputs_dict.items(): if isinstance(key, bool): pt_inputs_dict[name] = key elif name == "input_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif name == "pixel_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif name == "input_features": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) # other general float inputs elif tf_inputs_dict[name].dtype.is_floating: pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) else: pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long) return pt_inputs_dict def check_pt_tf_models(self, tf_model, pt_model, tf_inputs_dict): pt_inputs_dict = self.prepare_pt_inputs_from_tf_inputs(tf_inputs_dict) # send pytorch inputs to the correct device pt_inputs_dict = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items() } # send pytorch model to the correct device pt_model.to(torch_device) # Check predictions on first output (logits/hidden-states) are close enough given low-level computational differences pt_model.eval() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs_dict) tf_outputs = tf_model(tf_inputs_dict) # tf models returned loss is usually a tensor rather than a scalar. # (see `hf_compute_loss`: it uses `keras.losses.Reduction.NONE`) # Change it here to a scalar to match PyTorch models' loss tf_loss = getattr(tf_outputs, "loss", None) if tf_loss is not None: tf_outputs.loss = tf.math.reduce_mean(tf_loss) self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(tf_model)) @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self, allow_missing_keys=False): import transformers for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency # of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`. # TODO: Use a uniform value for all models, make sure all tests pass without this processing, and remove it. self._make_attention_mask_non_null(inputs_dict) pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) tf_model = model_class(config) pt_model = pt_model_class(config) tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class) tf_inputs_dict_with_labels = self._prepare_for_class( inputs_dict, model_class, # Not all models accept "labels" in the forward pass (yet :) ) return_labels=True if "labels" in inspect.signature(model_class.call).parameters.keys() else False, ) # For some models (e.g. base models), there is no label returned. # Set the input dict to `None` to avoid check outputs twice for the same input dicts. if not set(tf_inputs_dict_with_labels.keys()).symmetric_difference(tf_inputs_dict.keys()): tf_inputs_dict_with_labels = None # Check we can load pt model in tf and vice-versa with model => model functions tf_model = transformers.load_pytorch_model_in_tf2_model( tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys ) pt_model = transformers.load_tf2_model_in_pytorch_model( pt_model, tf_model, allow_missing_keys=allow_missing_keys ) # Original test: check without `labels` self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) # check with `labels` if tf_inputs_dict_with_labels: self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict_with_labels) # Check we can load pt model in tf and vice-versa with checkpoint => model functions with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin") torch.save(pt_model.state_dict(), pt_checkpoint_path) tf_model = transformers.load_pytorch_checkpoint_in_tf2_model( tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys ) tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5") tf_model.save_weights(tf_checkpoint_path) pt_model = transformers.load_tf2_checkpoint_in_pytorch_model( pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys ) # Original test: check without `labels` self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) # check with `labels` if tf_inputs_dict_with_labels: self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict_with_labels) @slow def test_compile_tf_model(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: # Prepare our model model = model_class(config) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes functional_inputs = { key: keras.Input(shape=val.shape[1:], dtype=val.dtype, name=key) for key, val in model.input_signature.items() if key in model.dummy_inputs } outputs_dict = model(functional_inputs) hidden_states = outputs_dict[0] # Compile extended model functional_model = keras.Model(inputs=functional_inputs, outputs=hidden_states) model_out = functional_model.predict(model.dummy_inputs) # Check we can pass inputs with the Keras API self.assertTrue(model_out is not None) with tempfile.TemporaryDirectory() as tmpdirname: functional_model.save(tmpdirname) # Ensure we can save/export the whole functional model def test_keyword_and_dict_args(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) outputs_dict = model(inputs) inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) outputs_keywords = model(**inputs_keywords) output_dict = outputs_dict[0].numpy() output_keywords = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length) decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) def check_decoder_attentions_output(outputs): out_len = len(outputs) self.assertEqual(min(out_len % 2, out_len % 5), 0) # differentiation due to newly added cross_attentions decoder_attentions = outputs.decoder_attentions self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) def check_encoder_attentions_output(outputs): attentions = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True config.output_hidden_states = False model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) out_len = len(outputs) self.assertEqual(config.output_hidden_states, False) check_encoder_attentions_output(outputs) if self.is_encoder_decoder: model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(config.output_hidden_states, False) check_decoder_attentions_output(outputs) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(config.output_hidden_states, False) check_encoder_attentions_output(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True config.output_hidden_states = True model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs)) self.assertEqual(model.config.output_hidden_states, True) check_encoder_attentions_output(outputs) def test_headmasking(self): if not self.test_head_masking: return random.Random().seed(42) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() random.Random().seed() inputs_dict["output_attentions"] = True config.output_hidden_states = True configs_no_init = _config_zero_init(config) # To be sure we have no Nan for model_class in self.all_model_classes: model = model_class(config=configs_no_init) # Prepare head_mask def prepare_layer_head_mask(i, attention_heads, num_hidden_layers): if i == 0: return tf.concat( (tf.zeros(1, dtype=tf.float32), tf.ones(attention_heads - 1, dtype=tf.float32)), 0 ) elif i == num_hidden_layers - 1: return tf.concat( (tf.zeros(attention_heads - 1, dtype=tf.float32), tf.ones(1, dtype=tf.float32)), 0 ) else: return tf.ones(attention_heads, dtype=tf.float32) head_mask = tf.stack( [ prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers) for i in range(config.num_hidden_layers) ], 0, ) inputs = self._prepare_for_class(inputs_dict, model_class).copy() inputs["head_mask"] = head_mask if model.config.is_encoder_decoder: signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] if "decoder_head_mask" in arg_names: # necessary diferentiation because of T5 model inputs["decoder_head_mask"] = head_mask if "cross_attn_head_mask" in arg_names: inputs["cross_attn_head_mask"] = head_mask outputs = model(**inputs, return_dict=True) def check_attentions_validity(attentions): # Remove Nan for t in attentions: self.assertLess( (tf.math.reduce_sum(tf.cast(tf.math.is_nan(t), tf.float32))).numpy(), (tf.size(t) / 4).numpy() ) # Check we don't have more than 25% nans (arbitrary) attentions = [ tf.where(tf.math.is_nan(t), 0.0, t) for t in attentions ] # remove them (the test is less complete) self.assertAlmostEqual(tf.math.reduce_sum(attentions[0][..., 0, :, :]).numpy(), 0.0) self.assertNotEqual(tf.math.reduce_sum(attentions[0][..., -1, :, :]).numpy(), 0.0) if len(attentions) > 2: # encoder-decodere models have only 2 layers in each modules self.assertNotEqual(tf.math.reduce_sum(attentions[1][..., 0, :, :]).numpy(), 0.0) self.assertAlmostEqual(tf.math.reduce_sum(attentions[-1][..., -2, :, :]).numpy(), 0.0) self.assertNotEqual(tf.math.reduce_sum(attentions[-1][..., -1, :, :]).numpy(), 0.0) if model.config.is_encoder_decoder: check_attentions_validity(outputs.encoder_attentions) check_attentions_validity(outputs.decoder_attentions) if "cross_attn_head_mask" in arg_names: check_attentions_validity(outputs.cross_attentions) else: check_attentions_validity(outputs.attentions) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_hidden_states_output(config, inputs_dict, model_class): model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) if model.config.is_encoder_decoder: encoder_hidden_states = outputs.encoder_hidden_states decoder_hidden_states = outputs.decoder_hidden_states self.assertEqual(config.output_attentions, False) self.assertEqual(len(encoder_hidden_states), expected_num_layers) self.assertListEqual( list(encoder_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) self.assertEqual(len(decoder_hidden_states), expected_num_layers) self.assertListEqual( list(decoder_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) else: hidden_states = outputs.hidden_states self.assertEqual(config.output_attentions, False) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(config, inputs_dict, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(config, inputs_dict, model_class) def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() text_in_text_out_models = ( get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING) + get_values(TF_MODEL_FOR_MASKED_LM_MAPPING) + get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING) ) speech_in_text_out_models = get_values(TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING) for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), keras.layers.Layer) legacy_text_in_text_out = model.get_lm_head() is not None if model_class in text_in_text_out_models or legacy_text_in_text_out: out_embeddings = model.get_output_embeddings() self.assertIsInstance(out_embeddings, keras.layers.Layer) bias = model.get_bias() if bias is not None: self.assertIsInstance(bias, dict) for _, v in bias.items(): self.assertIsInstance(v, tf.Variable) elif model_class in speech_in_text_out_models: out_embeddings = model.get_output_embeddings() self.assertIsInstance(out_embeddings, keras.layers.Layer) bias = model.get_bias() self.assertIsNone(bias) else: out_embeddings = model.get_output_embeddings() assert out_embeddings is None bias = model.get_bias() self.assertIsNone(bias) def test_determinism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) first, second = ( model(self._prepare_for_class(inputs_dict, model_class), training=False)[0], model(self._prepare_for_class(inputs_dict, model_class), training=False)[0], ) out_1 = first.numpy() out_2 = second.numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(tuple_object, dict_object)), msg=( "Tuple and dict output are not equal. Difference:" f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}" ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) # Not all models accept "labels" in the forward pass (yet :) ) if "labels" in inspect.signature(model.call).parameters.keys(): tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) inputs = copy.deepcopy(inputs_dict) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) if not self.is_encoder_decoder: inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids) else: inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids) inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids) inputs = self._prepare_for_class(inputs, model_class) model(inputs) def test_numpy_arrays_inputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def prepare_numpy_arrays(inputs_dict): inputs_np_dict = {} for k, v in inputs_dict.items(): if tf.is_tensor(v): inputs_np_dict[k] = v.numpy() else: inputs_np_dict[k] = np.array(k) return inputs_np_dict for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) inputs_np = prepare_numpy_arrays(inputs) output_for_dict_input = model(inputs_np) output_for_kw_input = model(**inputs_np) self.assert_outputs_same(output_for_dict_input, output_for_kw_input) def test_valid_input_signature_and_dummies(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) call_args = inspect.signature(model.call).parameters for key in model.input_signature: self.assertIn(key, call_args) for key in model.dummy_inputs: self.assertIn(key, call_args) def test_resize_token_embeddings(self): # TODO (joao): after the embeddings refactor is complete, rework this test so as to rely exclusively on # keras.layers.Embedding if not self.test_resize_embeddings: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(model, embedding_layer): if isinstance(embedding_layer, keras.layers.Embedding): # builds the embeddings layer model.build_in_name_scope() return embedding_layer.embeddings else: return model._get_word_embedding_weight(embedding_layer) for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10, None]: # build the embeddings model = model_class(config=copy.deepcopy(config)) # `resize_token_embeddings` mutates `config` old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) old_bias = model.get_bias() old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) # reshape the embeddings model.resize_token_embeddings(size) new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) new_bias = model.get_bias() new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) # check that the resized embeddings size matches the desired size. assert_size = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0], assert_size) # check that weights remain the same after resizing models_equal = True for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_bias is not None and new_bias is not None: for old_weight, new_weight in zip(old_bias.values(), new_bias.values()): self.assertEqual(new_weight.shape[-1], assert_size) models_equal = True for p1, p2 in zip(tf.squeeze(old_weight), tf.squeeze(new_weight)): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0], assert_size) self.assertEqual(new_output_embeddings.shape[1], old_output_embeddings.shape[1]) models_equal = True for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) # TODO (Joao): this test is not slow, but it's tagged as such to keep track of failures on the scheduled CI runs, # while passing push CI. Fix the underlying issues and remove the tag. @slow def test_save_load_after_resize_token_embeddings(self): if not self.test_resize_embeddings: return config, original_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # create a model with resized (expended) embeddings new_tokens_size = 10 old_total_size = config.vocab_size new_total_size = old_total_size + new_tokens_size model = model_class(config=copy.deepcopy(config)) # `resize_token_embeddings` mutates `config` model.build_in_name_scope() model.resize_token_embeddings(new_total_size) # fetch the output for an input exclusively made of new members of the vocabulary inputs_dict = copy.deepcopy(original_inputs_dict) ids_feat_name = None if "input_ids" in inputs_dict: ids_feat_name = "input_ids" elif "decoder_input_ids" in inputs_dict: ids_feat_name = "decoder_input_ids" else: assert False, "No input ids feature found in the inputs dict" new_vocab_input_ids = ids_tensor(inputs_dict[ids_feat_name].shape, new_tokens_size) new_vocab_input_ids += old_total_size inputs_dict[ids_feat_name] = new_vocab_input_ids if "input_ids" in inputs_dict: inputs_dict["input_ids"] = new_vocab_input_ids if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"] = new_vocab_input_ids prepared_inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**prepared_inputs) # save and load the model with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=False) model = model_class.from_pretrained(tmpdirname) restored_model_outputs = model(**prepared_inputs) # check that the output for the restored model is the same self.assert_outputs_same(restored_model_outputs, outputs) @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="This test always passes on CPU.", ) def test_embeddings_out_of_bounds_raise_exception(self): # TF embeddings layers don't raise an exception when an index is out of bounds on GPU, so we manually raise it. # This test should only fail on GPU for models where we haven't added the safety check. if not self.test_resize_embeddings: return config, original_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) inputs_dict = copy.deepcopy(original_inputs_dict) if "input_ids" in inputs_dict: inputs_dict["input_ids"] = inputs_dict["input_ids"] * int(1e9) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"] = inputs_dict["decoder_input_ids"] * int(1e9) prepared_inputs = self._prepare_for_class(inputs_dict, model_class) with self.assertRaises(tf.errors.InvalidArgumentError): model(**prepared_inputs) def test_lm_head_model_random_no_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict.get("input_ids", None) # iterate over all generative models for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: # if bos token id is not defined model needs input_ids with self.assertRaises(ValueError): model.generate(do_sample=True, max_length=5) # num_return_sequences = 1 self._check_generated_ids(model.generate(input_ids, do_sample=True)) elif model_class.__name__ not in ["TFSpeech2TextForConditionalGeneration"]: # Models with non-text inputs won't work here; num_return_sequences = 1 self._check_generated_ids(model.generate(do_sample=True, max_length=5)) with self.assertRaises(ValueError): # generating multiple sequences when no beam search generation # is not allowed as it would always generate the same sequences model.generate(input_ids, do_sample=False, num_return_sequences=2) # num_return_sequences > 1, sample self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2)) # check bad words tokens language generation # create list of 1-seq bad token and list of 2-seq of bad tokens bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2 ) # only count generated tokens generated_ids = output_tokens[:, input_ids.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) def test_lm_head_model_no_beam_search_generate_dict_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict.get("input_ids", None) if input_ids is None: input_ids = inputs_dict.get("input_features", None) # iterate over all generative models for model_class in self.all_generative_model_classes: model = model_class(config) output_greedy = model.generate( input_ids, do_sample=False, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) output_sample = model.generate( input_ids, do_sample=True, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_greedy, TFGreedySearchEncoderDecoderOutput) self.assertIsInstance(output_sample, TFSampleEncoderDecoderOutput) else: self.assertIsInstance(output_greedy, TFGreedySearchDecoderOnlyOutput) self.assertIsInstance(output_sample, TFSampleDecoderOnlyOutput) def test_lm_head_model_random_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict.get("input_ids", None) for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: # if bos token id is not defined model needs input_ids, num_return_sequences = 1 self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2)) else: # num_return_sequences = 1 self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2)) with self.assertRaises(ValueError): # generating more sequences than having beams leads is not possible model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2) # num_return_sequences > 1, sample self._check_generated_ids( model.generate( input_ids, do_sample=True, num_beams=2, num_return_sequences=2, ) ) # num_return_sequences > 1, greedy self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2)) # check bad words tokens language generation # create list of 1-seq bad token and list of 2-seq of bad tokens bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2 ) # only count generated tokens generated_ids = output_tokens[:, input_ids.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) def test_lm_head_model_beam_search_generate_dict_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict.get("input_ids", None) if input_ids is None: input_ids = inputs_dict.get("input_features", None) # iterate over all generative models for model_class in self.all_generative_model_classes: model = model_class(config) output_beam_search = model.generate( input_ids, num_beams=2, do_sample=False, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) output_beam_sample = model.generate( input_ids, num_beams=2, do_sample=True, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_search, TFBeamSearchEncoderDecoderOutput) self.assertIsInstance(output_beam_sample, TFBeamSampleEncoderDecoderOutput) else: self.assertIsInstance(output_beam_search, TFBeamSearchDecoderOnlyOutput) self.assertIsInstance(output_beam_sample, TFBeamSampleDecoderOnlyOutput) def test_loss_computation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # The number of elements in the loss should be the same as the number of elements in the label prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) added_label_names = sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=True) if not added_label_names: continue # This test is only for models with easily-separable labels added_label = prepared_for_class[added_label_names[0]] expected_loss_size = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) possible_input_names = {"input_ids", "pixel_values", "input_features", "input_values"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) outputs = model(model_input, **prepared_for_class) if not isinstance(outputs, ModelOutput) or not hasattr(outputs, "loss"): continue loss = outputs.loss self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) # Test that model correctly compute the loss when we mask some positions prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) possible_input_names = {"input_ids", "pixel_values", "input_features", "input_values"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) if "labels" in prepared_for_class: labels = prepared_for_class["labels"].numpy() if len(labels.shape) > 1 and labels.shape[1] != 1: labels[0] = -100 prepared_for_class["labels"] = tf.convert_to_tensor(labels) loss = model(model_input, **prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) self.assertTrue(not np.any(np.isnan(loss.numpy()))) # Test that model correctly compute the loss with a dict prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) loss = model(prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) # Test that model correctly compute the loss with a tuple prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) # Get keys that were added with the _prepare_for_class function label_keys = prepared_for_class.keys() - inputs_dict.keys() signature = inspect.signature(model.call).parameters signature_names = list(signature.keys()) # Create a dictionary holding the location of the tensors in the tuple tuple_index_mapping = {0: input_name} for label_key in label_keys: label_key_index = signature_names.index(label_key) tuple_index_mapping[label_key_index] = label_key sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) # Initialize a list with their default values, update the values and convert to a tuple list_input = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default) for index, value in sorted_tuple_index_mapping: list_input[index] = prepared_for_class[value] tuple_input = tuple(list_input) # Send to model loss = model(tuple_input[:-1])[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) def check_keras_fit_results(self, val_loss1, val_loss2, atol=1e-2, rtol=1e-3): self.assertTrue(np.allclose(val_loss1, val_loss2, atol=atol, rtol=rtol)) @slow def test_keras_fit(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # Test that model correctly compute the loss with kwargs prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) # We also remove "return_loss" as this is covered by the train_step when using fit() prepared_for_class = { key: val for key, val in prepared_for_class.items() if key not in ("head_mask", "decoder_head_mask", "cross_attn_head_mask", "return_loss") } if "labels" in prepared_for_class and "decoder_input_ids" in prepared_for_class: del prepared_for_class["decoder_input_ids"] accuracy_classes = [ "ForPreTraining", "ForCausalLM", "ForMaskedLM", "ForQuestionAnswering", "ForMultipleChoice", "ForSequenceClassification", "ForTokenClassification", "ForNextSentencePrediction", "LMHeadModel", ] for accuracy_class in accuracy_classes: if model.__class__.__name__.endswith(accuracy_class): metrics = [keras.metrics.SparseCategoricalAccuracy()] break else: metrics = [] if hasattr(self.model_tester, "batch_size"): sample_weight = tf.convert_to_tensor([0.5] * self.model_tester.batch_size, dtype=tf.float32) else: sample_weight = None # Build the model so we can get some constant weights and check outputs outputs = model(prepared_for_class) if getattr(outputs, "loss", None) is None: continue model_weights = model.get_weights() # Run eagerly to save some expensive compilation times model.compile(optimizer=keras.optimizers.SGD(0.0), run_eagerly=True, metrics=metrics) # Make sure the model fits without crashing regardless of where we pass the labels history1 = model.fit( prepared_for_class, validation_data=prepared_for_class, sample_weight=sample_weight, steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss1 = history1.history["val_loss"][0] self.assertTrue(not isnan(val_loss1)) accuracy1 = {key: val[0] for key, val in history1.history.items() if key.endswith("accuracy")} possible_label_cols = { "labels", "label", "label_ids", "start_positions", "start_position", "end_positions", "end_position", "next_sentence_label", } label_names = possible_label_cols.intersection(set(prepared_for_class)) if len(label_names) == 0: # The next tests only make sense for models with separate inputs and labels, and do not make # sense for models that don't clearly distinguish between the two (e.g. CLIP) return labels = {key: val for key, val in prepared_for_class.items() if key in label_names} inputs_minus_labels = {key: val for key, val in prepared_for_class.items() if key not in label_names} self.assertGreater(len(inputs_minus_labels), 0) # We reinitialize the model here even though our learning rate was zero # because BatchNorm updates weights by means other than gradient descent. model.set_weights(model_weights) history2 = model.fit( inputs_minus_labels, labels, validation_data=(inputs_minus_labels, labels), sample_weight=sample_weight, steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss2 = history2.history["val_loss"][0] self.assertTrue(not isnan(val_loss2)) accuracy2 = {key: val[0] for key, val in history2.history.items() if key.endswith("accuracy")} self.check_keras_fit_results(val_loss1, val_loss2) self.assertEqual(history1.history.keys(), history2.history.keys()) for key in history1.history.keys(): if not key.startswith("val_"): self.assertTrue("val_" + key in history1.history.keys(), "Outputs differ in train/test step!") if metrics: self.assertTrue(len(accuracy1) == len(accuracy2) > 0, "Missing metrics!") def test_int_support(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: prepared_for_class = self._prepare_for_class( inputs_dict.copy(), model_class, return_labels=True if "labels" in inspect.signature(model_class.call).parameters.keys() else False, ) if not any( tensor.dtype.is_integer for tensor in prepared_for_class.values() if isinstance(tensor, tf.Tensor) ): return # No integer inputs means no need for this test prepared_for_class = { key: tf.cast(tensor, tf.int64) if isinstance(tensor, tf.Tensor) and tensor.dtype.is_integer else tensor for key, tensor in prepared_for_class.items() } model = model_class(config) model(**prepared_for_class) # No assertion, we're just checking this doesn't throw an error int32_prepared_for_class = { key: tf.cast(tensor, tf.int32) if isinstance(tensor, tf.Tensor) and tensor.dtype.is_integer else tensor for key, tensor in prepared_for_class.items() } model(**int32_prepared_for_class) # No assertion, we're just checking this doesn't throw an error # After testing that the model accepts all int inputs, confirm that its dummies are int32 for key, tensor in model.dummy_inputs.items(): self.assertTrue( isinstance(tensor, tf.Tensor) or keras.backend.is_keras_tensor(tensor), "Dummy inputs should be tf.Tensor!", ) if tensor.dtype.is_integer: self.assertTrue(tensor.dtype == tf.int32, "Integer dummy inputs should be tf.int32!") # Also confirm that the input_signature uses int32 for key, tensor_spec in model.input_signature.items(): if tensor_spec.dtype.is_integer: self.assertTrue(tensor_spec.dtype == tf.int32, "Input signatures should use tf.int32 for ints!") def test_generate_with_headmasking(self): attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_generative_model_classes: model = model_class(config) # We want to test only encoder-decoder models if not config.is_encoder_decoder: continue head_masking = { "head_mask": tf.zeros((config.encoder_layers, config.encoder_attention_heads)), "decoder_head_mask": tf.zeros((config.decoder_layers, config.decoder_attention_heads)), "cross_attn_head_mask": tf.zeros((config.decoder_layers, config.decoder_attention_heads)), } signature = inspect.signature(model.call) if set(head_masking.keys()) < {*signature.parameters.keys()}: continue for attn_name, (name, mask) in zip(attention_names, head_masking.items()): out = model.generate( inputs_dict["input_ids"], num_beams=1, max_length=inputs_dict["input_ids"] + 5, output_attentions=True, return_dict_in_generate=True, **{name: mask}, ) # We check the state of decoder_attentions and cross_attentions just from the last step attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([tf.reduce_sum(w).numpy() for w in attn_weights]), 0.0) def test_load_with_mismatched_shapes(self): if not self.test_mismatched_shapes: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class not in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): continue with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) _ = model(**inputs) model.save_pretrained(tmp_dir) # Fails when we don't set ignore_mismatched_sizes=True with self.assertRaises(ValueError): new_model = TFAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) with self.assertRaises(ValueError): new_model_without_prefix = TFAutoModel.from_pretrained(tmp_dir, vocab_size=10) logger = logging.get_logger("transformers.modeling_tf_utils") with CaptureLogger(logger) as cl: new_model = TFAutoModelForSequenceClassification.from_pretrained( tmp_dir, num_labels=42, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) logits = new_model(**inputs).logits self.assertEqual(logits.shape[1], 42) with CaptureLogger(logger) as cl: new_model_without_prefix = TFAutoModel.from_pretrained( tmp_dir, vocab_size=10, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) # Although Tf models always have a prefix pointing to `MainLayer`, # we still add this "without prefix" test to keep a consistency between tf and pt tests. input_ids = ids_tensor((2, 8), 10) if self.is_encoder_decoder: new_model_without_prefix(input_ids, decoder_input_ids=input_ids) else: new_model_without_prefix(input_ids) def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "call")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(model_class.main_input_name, observed_main_input_name) def test_dataset_conversion(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class, return_labels=False) if "labels" in tf_inputs_dict: return # This is some kinda funky decoder model that needs labels in its forward pass tf_inputs_dict = { key: val for key, val in tf_inputs_dict.items() if "head_mask" not in key and isinstance(val, tf.Tensor) } tf_inputs_dict["extra_unwanted_column"] = list(tf_inputs_dict.values())[0] # Use a random other tensor input_dataset = Dataset.from_dict(tf_inputs_dict) tf_dataset = model.prepare_tf_dataset( input_dataset, batch_size=len(input_dataset), drop_remainder=False, shuffle=False ) test_batch = next(iter(tf_dataset)) if isinstance(test_batch, tf.Tensor): self.assertEqual(len(test_batch), len(input_dataset)) # Assert we didn't lose any data elif isinstance(test_batch, dict): # Assert we discarded the unwanted extra column but kept everything else self.assertEqual(len(test_batch), len(input_dataset.features) - 1) self.assertNotIn("extra_unwanted_column", test_batch) for tensor in test_batch.values(): self.assertTrue(isinstance(tensor, tf.Tensor)) self.assertEqual(len(tensor), len(input_dataset)) # Assert we didn't lose any data model(test_batch, training=False) if "labels" in inspect.signature(model_class.call).parameters.keys(): tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class, return_labels=True) if "labels" not in tf_inputs_dict: return # This model isn't giving us labels after all, don't try training with it tf_inputs_dict = { key: val for key, val in tf_inputs_dict.items() if "head_mask" not in key and isinstance(val, tf.Tensor) } tf_inputs_dict["extra_unwanted_column"] = list(tf_inputs_dict.values())[0] # Use a random other tensor input_dataset = Dataset.from_dict(tf_inputs_dict) tf_dataset = model.prepare_tf_dataset( input_dataset, batch_size=len(input_dataset), drop_remainder=False, shuffle=False ) test_batch, test_batch_labels = next(iter(tf_dataset)) self.assertGreater(len(test_batch_labels), 0) # Assert the labels are present feature_columns = 1 if isinstance(test_batch, tf.Tensor) else len(test_batch) label_columns = 1 if isinstance(test_batch_labels, tf.Tensor) else len(test_batch_labels) # Assert we discarded the unwanted extra column but kept everything else self.assertEqual(feature_columns + label_columns, len(input_dataset.features) - 1) if isinstance(test_batch, dict): self.assertNotIn("extra_unwanted_column", test_batch) if isinstance(test_batch_labels, dict): self.assertNotIn("extra_unwanted_column", test_batch_labels) model.compile(optimizer="sgd", run_eagerly=True) model.train_on_batch(test_batch, test_batch_labels) def _test_xla_generate(self, **generate_kwargs): def _generate_and_check_results(model, inputs, is_input_ids): # make sure there are no pad tokens in prompt, which may trigger unwanted behavior if is_input_ids: if model.generation_config.pad_token_id is not None: if config.pad_token_id == 0: new_pad_token = model.generation_config.pad_token_id + 1 else: new_pad_token = model.generation_config.pad_token_id - 1 else: new_pad_token = None inputs = tf.where(inputs != model.generation_config.pad_token_id, inputs, new_pad_token) generated = model.generate(inputs, **generate_kwargs).numpy() generate_xla = tf.function(model.generate, jit_compile=True) generated_xla = generate_xla(inputs, **generate_kwargs).numpy() # Due to numerical instability, let's fail the test only if there are more than 10% of input sequences give # different outputs between XLA and non-XLA versions. If there are less than 10 examples, let's be strict # and not allow any difference. diff = [[], []] for _generated, _generated_xla in zip(generated.tolist(), generated_xla.tolist()): if _generated != _generated_xla: diff[0].append(_generated) diff[1].append(_generated_xla) ratio = len(diff[0]) / len(generated) if ratio > 0.1 or (len(diff[0]) > 0 and len(generated) < 10): self.assertListEqual(diff[0], diff[1]) for model_class in self.all_generative_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.eos_token_id = None # Generate until max length config.do_sample = False # extract the input to the model is_input_ids = "input_ids" in inputs_dict is_input_features = "input_features" in inputs_dict if not (is_input_ids or is_input_features): raise ValueError("No valid generate input found in inputs_dict") inputs = inputs_dict["input_ids"] if is_input_ids else inputs_dict["input_features"] # fix config for models with additional sequence-length limiting settings seq_len = inputs.get_shape()[1] for var_name in ["max_position_embeddings", "max_target_positions"]: attr = getattr(config, var_name, None) if attr is not None and attr < seq_len + generate_kwargs["max_new_tokens"]: try: setattr(config, var_name, seq_len + generate_kwargs["max_new_tokens"]) except NotImplementedError: # xlnet will raise an exception when trying to set # max_position_embeddings. pass model = model_class(config) if model.supports_xla_generation: _generate_and_check_results(model, inputs, is_input_ids) else: with self.assertRaises(ValueError): _generate_and_check_results(model, inputs, is_input_ids) def test_xla_generate_fast(self): """ Basic quick test for generate-compatible classes that confirms that XLA-generated tokens are the same as their non XLA counterparts. Either the model supports XLA generation and passes the inner test, or it raises an appropriate exception """ self._test_xla_generate(num_beams=1, num_return_sequences=1, max_new_tokens=3) @slow def test_xla_generate_contrastive(self): """ Slow and challenging version of `test_xla_generate_fast` for contrastive search -- contrastive search directly manipulates the model cache and other outputs, and this test ensures that they are in a valid format that is also supported by XLA. Either the model supports XLA generation and passes the inner test, or it raises an appropriate exception """ self._test_xla_generate(num_beams=1, num_return_sequences=1, max_new_tokens=16, penalty_alpha=0.5, top_k=4) @slow def test_xla_generate_slow(self): """ Slow and challenging version of `test_xla_generate_fast` -- this test asks for several long sequences using beam search, with and without XLA. The two outputs should match, and a failure in this test indicates that the model may need further analysis if it is to be used for XLA generation. Either the model supports XLA generation and passes the inner test, or it raises an appropriate exception """ self._test_xla_generate(num_beams=8, num_return_sequences=2, max_new_tokens=128) def _generate_random_bad_tokens(self, num_bad_tokens, model): # special tokens cannot be bad tokens special_tokens = [] if model.config.bos_token_id is not None: special_tokens.append(model.config.bos_token_id) if model.config.pad_token_id is not None: special_tokens.append(model.config.pad_token_id) if model.config.eos_token_id is not None: special_tokens.append(model.config.eos_token_id) # create random bad tokens that are not special tokens bad_tokens = [] while len(bad_tokens) < num_bad_tokens: token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0] if token not in special_tokens: bad_tokens.append(token) return bad_tokens def _check_generated_ids(self, output_ids): for token_id in output_ids[0].numpy().tolist(): self.assertGreaterEqual(token_id, 0) self.assertLess(token_id, self.model_tester.vocab_size) def _check_match_tokens(self, generated_ids, bad_words_ids): # for all bad word tokens for bad_word_ids in bad_words_ids: # for all slices in batch for generated_ids_slice in generated_ids: # for all word idx for i in range(len(bad_word_ids), len(generated_ids_slice)): # if tokens match if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids: return True return False def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32) return output def random_attention_mask(shape, rng=None, name=None, dtype=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None, dtype=dtype) # Mark the first token as 1 (matches behaviour of PyTorch/Flax function) attn_mask = tf.concat([tf.ones_like(attn_mask[:, :1]), attn_mask[:, 1:]], axis=1) return attn_mask def floats_tensor(shape, scale=1.0, rng=None, name=None, dtype=None): """Creates a random float32 tensor""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return tf.reshape(tf.constant(values, dtype=dtype if dtype is not None else tf.float32), shape=shape)
transformers/tests/test_modeling_tf_common.py/0
{ "file_path": "transformers/tests/test_modeling_tf_common.py", "repo_id": "transformers", "token_count": 43810 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This test is meant to be run in on an instance with TPUs like this: # # python examples/pytorch/xla_spawn.py --num_cores=8 tests/test_trainer_tpu.py # # Replace 8 with the number of TPU cores you have. # import sys from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.utils import logging logger = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class DummyDataset(Dataset): def __init__(self, length: int = 101): self.length = length def __len__(self): return self.length def __getitem__(self, i) -> int: return i class DummyDataCollator: def __call__(self, features): return {"input_ids": torch.tensor(features), "labels": torch.tensor(features)} class DummyModel(nn.Module): def __init__(self): super().__init__() # Add some (unused) params otherwise DDP will complain. self.fc = nn.Linear(120, 80) def forward(self, input_ids, labels=None): if labels is not None: return torch.tensor(0.0, device=input_ids.device), input_ids else: return input_ids def main(): parser = HfArgumentParser((TrainingArguments,)) sys.argv += ["--output_dir", "./examples"] training_args = parser.parse_args_into_dataclasses()[0] logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, " f"tpu_num_cores: {training_args.tpu_num_cores}", ) # Essentially, what we want to verify in the distributed case is # that we get all samples back, in the right order. # (this is crucial for prediction for instance) for dataset_length in [1001, 256, 15]: dataset = DummyDataset(dataset_length) def compute_metrics(p: EvalPrediction) -> Dict: sequential = list(range(len(dataset))) success = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential return {"success": success} trainer = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = 2 metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = None logger.info("🔥 All distributed tests successful") def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/tests/trainer/test_trainer_tpu.py/0
{ "file_path": "transformers/tests/trainer/test_trainer_tpu.py", "repo_id": "transformers", "token_count": 1651 }
import unittest import warnings from dataclasses import dataclass from transformers.convert_slow_tokenizer import SpmConverter from transformers.testing_utils import get_tests_dir @dataclass class FakeOriginalTokenizer: vocab_file: str class ConvertSlowTokenizerTest(unittest.TestCase): def test_spm_converter_bytefallback_warning(self): spm_model_file_without_bytefallback = get_tests_dir("fixtures/test_sentencepiece.model") spm_model_file_with_bytefallback = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model") original_tokenizer_without_bytefallback = FakeOriginalTokenizer(vocab_file=spm_model_file_without_bytefallback) with warnings.catch_warnings(record=True) as w: _ = SpmConverter(original_tokenizer_without_bytefallback) self.assertEqual(len(w), 0) original_tokenizer_with_bytefallback = FakeOriginalTokenizer(vocab_file=spm_model_file_with_bytefallback) with warnings.catch_warnings(record=True) as w: _ = SpmConverter(original_tokenizer_with_bytefallback) self.assertEqual(len(w), 1) self.assertIn( "The sentencepiece tokenizer that you are converting to a fast tokenizer uses the byte fallback option" " which is not implemented in the fast tokenizers.", str(w[0].message), )
transformers/tests/utils/test_convert_slow_tokenizer.py/0
{ "file_path": "transformers/tests/utils/test_convert_slow_tokenizer.py", "repo_id": "transformers", "token_count": 524 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import unittest from transformers import LlamaConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device if is_torch_available(): import torch from transformers import ROPE_INIT_FUNCTIONS from transformers.modeling_rope_utils import rope_config_validation @require_torch class RopeTest(unittest.TestCase): def test_rope_validation(self): config = LlamaConfig() all_rope_types = ROPE_INIT_FUNCTIONS.keys() # The base config is always valid (default RoPE) rope_config_validation(config) # If we explicitly set the other RoPE types, then validation should fail for rope_type in all_rope_types: if rope_type != "default": config.rope_scaling = {"rope_type": rope_type} with self.assertRaises(KeyError): rope_config_validation(config) # Parameters are exclusive to their own RoPE type, and should raise an exception if incorrectly passed valid_param_mapping = { "factor": ["linear", "dynamic", "yarn", "longrope"], "attention_factor": ["yarn", "longrope"], "beta_fast": ["yarn"], "beta_slow": ["yarn"], "short_factor": ["longrope"], "long_factor": ["longrope"], } for rope_type in all_rope_types: if rope_type == "default": continue # checked above for param, valid_rope_types in valid_param_mapping.items(): # Set `param` with a dummy value -- we want to test the dict key config.rope_scaling = {"rope_type": rope_type, param: True} if rope_type in valid_rope_types: continue else: with self.assertRaises(KeyError): rope_config_validation(config) # Any other parameters passed to RoPE will raise a warning that a particular key is not used # But sometimes we can have model-specific RoPE kwargs and bypass warning with `ignore_keys` model_specific_kwarg = "mrope_sections" # e,g in Qwen2-VL for rope_type in all_rope_types: if rope_type == "default": config.rope_scaling = {"rope_type": rope_type, model_specific_kwarg: True} rope_config_validation(config, ignore_keys={model_specific_kwarg}) with self.assertLogs("transformers.modeling_rope_utils", level="WARNING") as logs: rope_config_validation(config) self.assertEqual(len(logs.output), 1) self.assertIn(model_specific_kwarg, logs.output[0]) def test_default_rope_function_bc(self): config = LlamaConfig() device = torch_device rope_kwargs = { "rope_type": "default", "dim": config.hidden_size // config.num_attention_heads, "max_position_embeddings": config.max_position_embeddings, "base": config.rope_theta, } rope_fn = ROPE_INIT_FUNCTIONS["default"] config_freqs = rope_fn(config=config, device=device)[0] kwargs_freqs = rope_fn(**rope_kwargs, device=device)[0] torch.testing.assert_close(config_freqs, kwargs_freqs) def test_linear_rope_function_bc(self): config = LlamaConfig() config.rope_scaling = {"rope_type": "linear", "factor": 10.0} device = torch_device rope_kwargs = { "rope_type": "linear", "dim": config.hidden_size // config.num_attention_heads, "max_position_embeddings": config.max_position_embeddings, "base": config.rope_theta, "factor": 10.0, } rope_fn = ROPE_INIT_FUNCTIONS["linear"] config_freqs = rope_fn(config=config, device=device)[0] kwargs_freqs = rope_fn(**rope_kwargs, device=device)[0] torch.testing.assert_close(config_freqs, kwargs_freqs) def test_dynamic_rope_function_bc(self): config = LlamaConfig() config.rope_scaling = {"rope_type": "dynamic", "factor": 10.0} device = torch_device rope_kwargs = { "rope_type": "dynamic", "dim": config.hidden_size // config.num_attention_heads, "max_position_embeddings": config.max_position_embeddings, "base": config.rope_theta, "factor": 10.0, } rope_fn = ROPE_INIT_FUNCTIONS["dynamic"] config_freqs = rope_fn(config=config, device=device)[0] kwargs_freqs = rope_fn(**rope_kwargs, device=device)[0] torch.testing.assert_close(config_freqs, kwargs_freqs) def test_default_rope_numerically(self): # Note: some RoPE scaling methods start off by calling the default RoPE frequencies. If this test fails, then # multiple RoPE strategies will fail. # fmt: off EXPECTED_INV_FREQ = torch.tensor( [ 1.0000e+00, 8.6596e-01, 7.4989e-01, 6.4938e-01, 5.6234e-01, 4.8697e-01, 4.2170e-01, 3.6517e-01, 3.1623e-01, 2.7384e-01, 2.3714e-01, 2.0535e-01, 1.7783e-01, 1.5399e-01, 1.3335e-01, 1.1548e-01, 1.0000e-01, 8.6596e-02, 7.4989e-02, 6.4938e-02, 5.6234e-02, 4.8697e-02, 4.2170e-02, 3.6517e-02, 3.1623e-02, 2.7384e-02, 2.3714e-02, 2.0535e-02, 1.7783e-02, 1.5399e-02, 1.3335e-02, 1.1548e-02, 1.0000e-02, 8.6596e-03, 7.4989e-03, 6.4938e-03, 5.6234e-03, 4.8697e-03, 4.2170e-03, 3.6517e-03, 3.1623e-03, 2.7384e-03, 2.3714e-03, 2.0535e-03, 1.7783e-03, 1.5399e-03, 1.3335e-03, 1.1548e-03, 1.0000e-03, 8.6596e-04, 7.4989e-04, 6.4938e-04, 5.6234e-04, 4.8697e-04, 4.2170e-04, 3.6517e-04, 3.1623e-04, 2.7384e-04, 2.3714e-04, 2.0535e-04, 1.7783e-04, 1.5399e-04, 1.3335e-04, 1.1548e-04 ], device=torch_device ) # fmt: on # input sanity checks: if these change, the output will also change config = LlamaConfig() self.assertEqual(config.rope_scaling, None) self.assertEqual(config.hidden_size, 4096) self.assertEqual(config.num_attention_heads, 32) self.assertEqual(config.rope_theta, 10000.0) self.assertFalse(hasattr(config, "partial_rotary_factor")) rope_fn = ROPE_INIT_FUNCTIONS["default"] inv_freq, attention_scale = rope_fn(config=config, device=torch_device) self.assertEqual(attention_scale, 1.0) # attention scale is always 1 for default RoPE torch.testing.assert_close(inv_freq, EXPECTED_INV_FREQ) def test_linear_rope_numerically(self): # This is a linear scaling strategy, the **frequencies** are scaled linearly with respect to the default # frequencies (= the inverse frequencies are scaled **inversely**) config = LlamaConfig() default_rope_fn = ROPE_INIT_FUNCTIONS["default"] default_inv_freq, _ = default_rope_fn(config=config, device=torch_device) rope_fn = ROPE_INIT_FUNCTIONS["linear"] for factor in (2.0, 10.0, 20.0): config.rope_scaling = {"rope_type": "linear", "factor": factor} inv_freq, attention_scale = rope_fn(config=config, device=torch_device) self.assertEqual(attention_scale, 1.0) # attention scale is always 1 for linear RoPE torch.testing.assert_close(inv_freq, default_inv_freq / factor) def test_dynamic_rope_numerically(self): # fmt: off EXPECTED_INV_FREQ = torch.tensor( [ 1.0000e+00, 8.0931e-01, 6.5498e-01, 5.3008e-01, 4.2900e-01, 3.4720e-01, 2.8099e-01, 2.2741e-01, 1.8404e-01, 1.4895e-01, 1.2055e-01, 9.7558e-02, 7.8955e-02, 6.3899e-02, 5.1714e-02, 4.1853e-02, 3.3872e-02, 2.7413e-02, 2.2185e-02, 1.7955e-02, 1.4531e-02, 1.1760e-02, 9.5176e-03, 7.7027e-03, 6.2339e-03, 5.0451e-03, 4.0831e-03, 3.3045e-03, 2.6744e-03, 2.1644e-03, 1.7517e-03, 1.4176e-03, 1.1473e-03, 9.2852e-04, 7.5146e-04, 6.0817e-04, 4.9220e-04, 3.9834e-04, 3.2238e-04, 2.6091e-04, 2.1115e-04, 1.7089e-04, 1.3830e-04, 1.1193e-04, 9.0585e-05, 7.3312e-05, 5.9332e-05, 4.8018e-05, 3.8861e-05, 3.1451e-05, 2.5453e-05, 2.0600e-05, 1.6672e-05, 1.3492e-05, 1.0920e-05, 8.8374e-06, 7.1522e-06, 5.7883e-06, 4.6845e-06, 3.7912e-06, 3.0683e-06, 2.4832e-06, 2.0097e-06, 1.6265e-06 ], device=torch_device ) # fmt: on # input sanity checks: if these change, the output will also change config = LlamaConfig() self.assertEqual(config.rope_scaling, None) self.assertEqual(config.hidden_size, 4096) self.assertEqual(config.num_attention_heads, 32) self.assertEqual(config.rope_theta, 10000.0) self.assertFalse(hasattr(config, "partial_rotary_factor")) rope_fn = ROPE_INIT_FUNCTIONS["default"] default_inv_freq, _ = rope_fn(config=config, device=torch_device) # Check 1: this is a dynamic scaling strategy, it will not scale unless we provide `seq_len` larger than the # model's original training sequence length rope_fn = ROPE_INIT_FUNCTIONS["dynamic"] for factor in (2.0, 10.0, 20.0): config.rope_scaling = {"rope_type": "dynamic", "factor": factor} inv_freq, attention_scale = rope_fn(config=config, device=torch_device) self.assertEqual(attention_scale, 1.0) # attention scale is always 1 for dynamic RoPE torch.testing.assert_close(inv_freq, default_inv_freq) inv_freq, _ = rope_fn(config=config, device=torch_device, seq_len=1) torch.testing.assert_close(inv_freq, default_inv_freq) # Check 2: if we provide `seq_len` larger than the model's original training sequence length, the frequencies # will scale up (i.e., the inverse frequencies will scale down). factor = 10.0 config.rope_scaling = {"rope_type": "dynamic", "factor": factor} inv_freq, _ = rope_fn(config=config, device=torch_device, seq_len=16384) with self.assertRaises(AssertionError): # It is NOT a linear factor torch.testing.assert_close(inv_freq, default_inv_freq / factor) torch.testing.assert_close(inv_freq, EXPECTED_INV_FREQ) def test_yarn_rope_numerically(self): # fmt: off EXPECTED_INV_FREQ = torch.tensor( [ 1.0000e+00, 8.6596e-01, 7.4989e-01, 6.4938e-01, 5.6234e-01, 4.8697e-01, 4.2170e-01, 3.6517e-01, 3.1623e-01, 2.7384e-01, 2.3714e-01, 2.0535e-01, 1.7783e-01, 1.5399e-01, 1.3335e-01, 1.1548e-01, 1.0000e-01, 8.3479e-02, 6.9590e-02, 5.7925e-02, 4.8136e-02, 3.9931e-02, 3.3061e-02, 2.7315e-02, 2.2515e-02, 1.8512e-02, 1.5177e-02, 1.2403e-02, 1.0101e-02, 8.1924e-03, 6.6143e-03, 5.3120e-03, 4.2400e-03, 3.3599e-03, 2.6396e-03, 2.0520e-03, 1.5746e-03, 1.1882e-03, 8.7713e-04, 6.2810e-04, 4.3007e-04, 2.7384e-04, 2.3714e-04, 2.0535e-04, 1.7783e-04, 1.5399e-04, 1.3335e-04, 1.1548e-04, 1.0000e-04, 8.6596e-05, 7.4989e-05, 6.4938e-05, 5.6234e-05, 4.8697e-05, 4.2170e-05, 3.6517e-05, 3.1623e-05, 2.7384e-05, 2.3714e-05, 2.0535e-05, 1.7783e-05, 1.5399e-05, 1.3335e-05, 1.1548e-05 ], device=torch_device ) # fmt: on # input sanity checks: if these change, the output will also change config = LlamaConfig() self.assertEqual(config.rope_scaling, None) self.assertEqual(config.hidden_size, 4096) self.assertEqual(config.num_attention_heads, 32) self.assertEqual(config.rope_theta, 10000.0) self.assertFalse(hasattr(config, "partial_rotary_factor")) rope_fn = ROPE_INIT_FUNCTIONS["default"] default_inv_freq, _ = rope_fn(config=config, device=torch_device) # Check 1: according to the paper, if `attention_factor` is not specified, then it has a specific default -- # `0.1 * math.log(factor) + 1.0` rope_fn = ROPE_INIT_FUNCTIONS["yarn"] for factor in (2.0, 10.0, 20.0): config.rope_scaling = {"rope_type": "yarn", "factor": factor} _, attention_scale = rope_fn(config=config, device=torch_device) self.assertEqual(attention_scale, 0.1 * math.log(factor) + 1.0) config.rope_scaling = {"rope_type": "yarn", "factor": factor, "attention_factor": 0.5} _, attention_scale = rope_fn(config=config, device=torch_device, seq_len=1) self.assertEqual(attention_scale, 0.5) # Check 2: based on `beta_fast` and `beta_slow`, the frequencies will be scaled between 1 and `factor`. # Increasing `beta_fast` will make RoPE more interpolative (apply scaling), and the other way around. # `beta_slow` behaves the opposite way. Remember: `beta_fast` > `beta_slow` # (note: adds a margin to the test for numerical stability) factor = 10.0 margin = 1e-8 config.rope_scaling = {"rope_type": "yarn", "factor": factor, "beta_fast": 32, "beta_slow": 1} inv_freq, _ = rope_fn(config=config, device=torch_device) is_bounded_by_factor = [ ((default_inv_freq[idx] / factor) - margin) <= yarn_inv_freq_value <= (default_inv_freq[idx] + margin) for idx, yarn_inv_freq_value in enumerate(inv_freq) ] self.assertTrue(all(is_bounded_by_factor)) # super high beta_fast = interpolation (i.e. scaling) in all but the first inverse frequency. The last ~20 # values (empirically checked for `beta_fast` = 1000) should be very small to linear scaling config.rope_scaling = {"rope_type": "yarn", "factor": factor, "beta_fast": 1000, "beta_slow": 1} inv_freq, _ = rope_fn(config=config, device=torch_device) is_interpolating = [ yarn_inv_freq_value < (default_inv_freq[idx] + margin) for idx, yarn_inv_freq_value in enumerate(inv_freq) ] self.assertFalse(is_interpolating[0]) self.assertTrue(all(is_interpolating[1:])) torch.testing.assert_close(inv_freq[-20:], default_inv_freq[-20:] / factor) # Check 3: numerical snapshot to avoid regressions config.rope_scaling = {"rope_type": "yarn", "factor": factor, "beta_fast": 32, "beta_slow": 1} inv_freq, _ = rope_fn(config=config, device=torch_device) torch.testing.assert_close(inv_freq, EXPECTED_INV_FREQ) def test_longrope_rope_numerically(self): # input sanity checks: if these change, the output will also change config = LlamaConfig() self.assertEqual(config.rope_scaling, None) self.assertEqual(config.hidden_size, 4096) self.assertEqual(config.num_attention_heads, 32) self.assertEqual(config.rope_theta, 10000.0) self.assertFalse(hasattr(config, "partial_rotary_factor")) # longrope applies scaling on EACH inv frequency, `short_factor` or `long_factor`, depending on the seq_len dim = config.hidden_size // config.num_attention_heads short_factor = [2.0] * (dim // 2) # scaling applied when seq_len <= max_position_embeddings long_factor = torch.ones(dim // 2).cumsum(0).tolist() # scaling applied when seq_len > max_position_embeddings rope_fn = ROPE_INIT_FUNCTIONS["default"] default_inv_freq, _ = rope_fn(config=config, device=torch_device) # Check 1: according to the paper, if `attention_factor` is not specified, then it has a specific default -- # `math.sqrt(1 + math.log(factor) / math.log(max_position_embeddings))` rope_fn = ROPE_INIT_FUNCTIONS["longrope"] max_position_embeddings = config.max_position_embeddings for factor in (2.0, 10.0, 20.0): config.rope_scaling = { "rope_type": "longrope", "factor": factor, "short_factor": short_factor, "long_factor": long_factor, } _, attention_scale = rope_fn(config=config, device=torch_device) self.assertEqual(attention_scale, math.sqrt(1 + math.log(factor) / math.log(max_position_embeddings))) config.rope_scaling = { "rope_type": "longrope", "factor": factor, "short_factor": short_factor, "long_factor": long_factor, "attention_factor": 0.5, } _, attention_scale = rope_fn(config=config, device=torch_device, seq_len=1) self.assertEqual(attention_scale, 0.5) config.rope_scaling = { "rope_type": "longrope", "factor": factor, "short_factor": short_factor, "long_factor": long_factor, } self.assertEqual(config.rope_scaling.get("attention_factor"), None) # Verify that "TypeError: '<' not supported between instances of 'NoneType' and 'int'" is not raised. rope_config_validation(config) # Check 2: seq_len == 0 -> short factor is applied to the default frequencies config.rope_scaling = { "rope_type": "longrope", "factor": 1.0, "short_factor": short_factor, "long_factor": long_factor, } inv_freq, _ = rope_fn(config=config, device=torch_device, seq_len=0) torch.testing.assert_close(inv_freq, default_inv_freq / torch.tensor(short_factor).to(torch_device)) # Check 3: seq_len > max_position_embeddings -> long factor is applied to the default frequencies inv_freq, _ = rope_fn(config=config, device=torch_device, seq_len=config.max_position_embeddings + 1) torch.testing.assert_close(inv_freq, default_inv_freq / torch.tensor(long_factor).to(torch_device)) def test_llama3_rope_numerically(self): # fmt: off EXPECTED_INV_FREQ = torch.tensor( [ 1.0000e+00, 8.6596e-01, 7.4989e-01, 6.4938e-01, 5.6234e-01, 4.8697e-01, 4.2170e-01, 3.6517e-01, 3.1623e-01, 2.7384e-01, 2.3714e-01, 2.0535e-01, 1.7783e-01, 1.5399e-01, 1.3335e-01, 1.1548e-01, 1.0000e-01, 8.6596e-02, 7.4989e-02, 6.4938e-02, 5.6234e-02, 4.8697e-02, 4.2170e-02, 3.6517e-02, 3.1623e-02, 2.7384e-02, 2.3714e-02, 2.0535e-02, 1.7783e-02, 1.5399e-02, 1.3335e-02, 1.0730e-02, 7.7785e-03, 5.6009e-03, 3.9991e-03, 2.8248e-03, 1.9675e-03, 1.3449e-03, 8.9549e-04, 5.7363e-04, 3.4539e-04, 2.7384e-04, 2.3714e-04, 2.0535e-04, 1.7783e-04, 1.5399e-04, 1.3335e-04, 1.1548e-04, 1.0000e-04, 8.6596e-05, 7.4989e-05, 6.4938e-05, 5.6234e-05, 4.8697e-05, 4.2170e-05, 3.6517e-05, 3.1623e-05, 2.7384e-05, 2.3714e-05, 2.0535e-05, 1.7783e-05, 1.5399e-05, 1.3335e-05, 1.1548e-05 ], device=torch_device ) # fmt: on # input sanity checks: if these change, the output will also change config = LlamaConfig() self.assertEqual(config.rope_scaling, None) self.assertEqual(config.hidden_size, 4096) self.assertEqual(config.num_attention_heads, 32) self.assertEqual(config.rope_theta, 10000.0) self.assertFalse(hasattr(config, "partial_rotary_factor")) rope_fn = ROPE_INIT_FUNCTIONS["default"] default_inv_freq, _ = rope_fn(config=config, device=torch_device) # Check 1: `attention_factor` is always 1 rope_fn = ROPE_INIT_FUNCTIONS["llama3"] for factor in (2.0, 10.0, 20.0): config.rope_scaling = { "rope_type": "llama3", "factor": factor, "original_max_position_embeddings": 2048, "low_freq_factor": 1, "high_freq_factor": 4, } _, attention_scale = rope_fn(config=config, device=torch_device) self.assertEqual(attention_scale, 1.0) # Check 2: based on `low_freq_factor` and `high_freq_factor`, the frequencies will be scaled between 1 and # `factor` (similar to yarn). Low frequencies get scaled by `factor`, high frequences see no change, medium # frequencies are scaled by a value in between. Changing `low_freq_factor` and `high_freq_factor` changes what # is considered low, medium, and high frequencies. factor = 10.0 config.rope_scaling = { "rope_type": "llama3", "factor": factor, "original_max_position_embeddings": 2048, "low_freq_factor": 1, "high_freq_factor": 4, } inv_freq, _ = rope_fn(config=config, device=torch_device) is_bounded_by_factor = [ (default_inv_freq[idx] / factor) <= llama3_inv_freq_value <= default_inv_freq[idx] for idx, llama3_inv_freq_value in enumerate(inv_freq) ] self.assertTrue(all(is_bounded_by_factor)) # if we change `high_freq_factor` to a very high value, none is considered high-frequency -> ALL values will be # scaled config.rope_scaling = config.rope_scaling = { "rope_type": "llama3", "factor": factor, "original_max_position_embeddings": 2048, "low_freq_factor": 1, "high_freq_factor": 1000, } inv_freq, _ = rope_fn(config=config, device=torch_device) is_scaled = [yarn_inv_freq_value < default_inv_freq[idx] for idx, yarn_inv_freq_value in enumerate(inv_freq)] self.assertTrue(all(is_scaled)) # Check 3: numerical snapshot to avoid regressions config.rope_scaling = { "rope_type": "llama3", "factor": factor, "original_max_position_embeddings": 2048, "low_freq_factor": 1, "high_freq_factor": 4, } inv_freq, _ = rope_fn(config=config, device=torch_device) torch.testing.assert_close(inv_freq, EXPECTED_INV_FREQ)
transformers/tests/utils/test_modeling_rope_utils.py/0
{ "file_path": "transformers/tests/utils/test_modeling_rope_utils.py", "repo_id": "transformers", "token_count": 11234 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script is responsible for cleaning the model section of the table of content by removing duplicates and sorting the entries in alphabetical order. Usage (from the root of the repo): Check that the table of content is properly sorted (used in `make quality`): ```bash python utils/check_doc_toc.py ``` Auto-sort the table of content if it is not properly sorted (used in `make style`): ```bash python utils/check_doc_toc.py --fix_and_overwrite ``` """ import argparse from collections import defaultdict from typing import List import yaml PATH_TO_TOC = "docs/source/en/_toctree.yml" def clean_model_doc_toc(model_doc: List[dict]) -> List[dict]: """ Cleans a section of the table of content of the model documentation (one specific modality) by removing duplicates and sorting models alphabetically. Args: model_doc (`List[dict]`): The list of dictionaries extracted from the `_toctree.yml` file for this specific modality. Returns: `List[dict]`: List of dictionaries like the input, but cleaned up and sorted. """ counts = defaultdict(int) for doc in model_doc: counts[doc["local"]] += 1 duplicates = [key for key, value in counts.items() if value > 1] new_doc = [] for duplicate_key in duplicates: titles = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key}) if len(titles) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]}) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1]) # Sort return sorted(new_doc, key=lambda s: s["title"].lower()) def check_model_doc(overwrite: bool = False): """ Check that the content of the table of content in `_toctree.yml` is clean (no duplicates and sorted for the model API doc) and potentially auto-cleans it. Args: overwrite (`bool`, *optional*, defaults to `False`): Whether to just check if the TOC is clean or to auto-clean it (when `overwrite=True`). """ with open(PATH_TO_TOC, encoding="utf-8") as f: content = yaml.safe_load(f.read()) # Get to the API doc api_idx = 0 while content[api_idx]["title"] != "API": api_idx += 1 api_doc = content[api_idx]["sections"] # Then to the model doc model_idx = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 model_doc = api_doc[model_idx]["sections"] # Extract the modalities and clean them one by one. modalities_docs = [(idx, section) for idx, section in enumerate(model_doc) if "sections" in section] diff = False for idx, modality_doc in modalities_docs: old_modality_doc = modality_doc["sections"] new_modality_doc = clean_model_doc_toc(old_modality_doc) if old_modality_doc != new_modality_doc: diff = True if overwrite: model_doc[idx]["sections"] = new_modality_doc if diff: if overwrite: api_doc[model_idx]["sections"] = model_doc content[api_idx]["sections"] = api_doc with open(PATH_TO_TOC, "w", encoding="utf-8") as f: f.write(yaml.dump(content, allow_unicode=True)) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") args = parser.parse_args() check_model_doc(args.fix_and_overwrite)
transformers/utils/check_doc_toc.py/0
{ "file_path": "transformers/utils/check_doc_toc.py", "repo_id": "transformers", "token_count": 1732 }
"""Script for downloading all GLUE data. Original source: https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e Note: for legal reasons, we are unable to host MRPC. You can either use the version hosted by the SentEval team, which is already tokenized, or you can download the original data from (https://download.microsoft.com/download/D/4/6/D46FF87A-F6B9-4252-AA8B-3604ED519838/MSRParaphraseCorpus.msi) and extract the data from it manually. For Windows users, you can run the .msi file. For Mac and Linux users, consider an external library such as 'cabextract' (see below for an example). You should then rename and place specific files in a folder (see below for an example). mkdir MRPC cabextract MSRParaphraseCorpus.msi -d MRPC cat MRPC/_2DEC3DBE877E4DB192D17C0256E90F1D | tr -d $'\r' > MRPC/msr_paraphrase_train.txt cat MRPC/_D7B391F9EAFF4B1B8BCE8F21B20B1B61 | tr -d $'\r' > MRPC/msr_paraphrase_test.txt rm MRPC/_* rm MSRParaphraseCorpus.msi 1/30/19: It looks like SentEval is no longer hosting their extracted and tokenized MRPC data, so you'll need to download the data from the original source for now. 2/11/19: It looks like SentEval actually *is* hosting the extracted data. Hooray! """ import argparse import os import sys import urllib.request import zipfile TASKS = ["CoLA", "SST", "MRPC", "QQP", "STS", "MNLI", "SNLI", "QNLI", "RTE", "WNLI", "diagnostic"] TASK2PATH = { "CoLA": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4", "SST": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8", "MRPC": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc", "QQP": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5", "STS": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5", "MNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce", "SNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSNLI.zip?alt=media&token=4afcfbb2-ff0c-4b2d-a09a-dbf07926f4df", "QNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601", "RTE": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb", "WNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf", "diagnostic": "https://storage.googleapis.com/mtl-sentence-representations.appspot.com/tsvsWithoutLabels%2FAX.tsv?GoogleAccessId=firebase-adminsdk-0khhl@mtl-sentence-representations.iam.gserviceaccount.com&Expires=2498860800&Signature=DuQ2CSPt2Yfre0C%2BiISrVYrIFaZH1Lc7hBVZDD4ZyR7fZYOMNOUGpi8QxBmTNOrNPjR3z1cggo7WXFfrgECP6FBJSsURv8Ybrue8Ypt%2FTPxbuJ0Xc2FhDi%2BarnecCBFO77RSbfuz%2Bs95hRrYhTnByqu3U%2FYZPaj3tZt5QdfpH2IUROY8LiBXoXS46LE%2FgOQc%2FKN%2BA9SoscRDYsnxHfG0IjXGwHN%2Bf88q6hOmAxeNPx6moDulUF6XMUAaXCSFU%2BnRO2RDL9CapWxj%2BDl7syNyHhB7987hZ80B%2FwFkQ3MEs8auvt5XW1%2Bd4aCU7ytgM69r8JDCwibfhZxpaa4gd50QXQ%3D%3D", } MRPC_TRAIN = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt" MRPC_TEST = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt" def download_and_extract(task, data_dir): print(f"Downloading and extracting {task}...") data_file = f"{task}.zip" urllib.request.urlretrieve(TASK2PATH[task], data_file) with zipfile.ZipFile(data_file) as zip_ref: zip_ref.extractall(data_dir) os.remove(data_file) print("\tCompleted!") def format_mrpc(data_dir, path_to_data): print("Processing MRPC...") mrpc_dir = os.path.join(data_dir, "MRPC") if not os.path.isdir(mrpc_dir): os.mkdir(mrpc_dir) if path_to_data: mrpc_train_file = os.path.join(path_to_data, "msr_paraphrase_train.txt") mrpc_test_file = os.path.join(path_to_data, "msr_paraphrase_test.txt") else: print("Local MRPC data not specified, downloading data from %s" % MRPC_TRAIN) mrpc_train_file = os.path.join(mrpc_dir, "msr_paraphrase_train.txt") mrpc_test_file = os.path.join(mrpc_dir, "msr_paraphrase_test.txt") urllib.request.urlretrieve(MRPC_TRAIN, mrpc_train_file) urllib.request.urlretrieve(MRPC_TEST, mrpc_test_file) if not os.path.isfile(mrpc_train_file): raise ValueError(f"Train data not found at {mrpc_train_file}") if not os.path.isfile(mrpc_test_file): raise ValueError(f"Test data not found at {mrpc_test_file}") urllib.request.urlretrieve(TASK2PATH["MRPC"], os.path.join(mrpc_dir, "dev_ids.tsv")) dev_ids = [] with open(os.path.join(mrpc_dir, "dev_ids.tsv"), encoding="utf8") as ids_fh: for row in ids_fh: dev_ids.append(row.strip().split("\t")) with open(mrpc_train_file, encoding="utf8") as data_fh, open( os.path.join(mrpc_dir, "train.tsv"), "w", encoding="utf8" ) as train_fh, open(os.path.join(mrpc_dir, "dev.tsv"), "w", encoding="utf8") as dev_fh: header = data_fh.readline() train_fh.write(header) dev_fh.write(header) for row in data_fh: label, id1, id2, s1, s2 = row.strip().split("\t") if [id1, id2] in dev_ids: dev_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2)) else: train_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2)) with open(mrpc_test_file, encoding="utf8") as data_fh, open( os.path.join(mrpc_dir, "test.tsv"), "w", encoding="utf8" ) as test_fh: header = data_fh.readline() test_fh.write("index\t#1 ID\t#2 ID\t#1 String\t#2 String\n") for idx, row in enumerate(data_fh): label, id1, id2, s1, s2 = row.strip().split("\t") test_fh.write("%d\t%s\t%s\t%s\t%s\n" % (idx, id1, id2, s1, s2)) print("\tCompleted!") def download_diagnostic(data_dir): print("Downloading and extracting diagnostic...") if not os.path.isdir(os.path.join(data_dir, "diagnostic")): os.mkdir(os.path.join(data_dir, "diagnostic")) data_file = os.path.join(data_dir, "diagnostic", "diagnostic.tsv") urllib.request.urlretrieve(TASK2PATH["diagnostic"], data_file) print("\tCompleted!") return def get_tasks(task_names): task_names = task_names.split(",") if "all" in task_names: tasks = TASKS else: tasks = [] for task_name in task_names: if task_name not in TASKS: raise ValueError(f"Task {task_name} not found!") tasks.append(task_name) return tasks def main(arguments): parser = argparse.ArgumentParser() parser.add_argument("--data_dir", help="directory to save data to", type=str, default="glue_data") parser.add_argument( "--tasks", help="tasks to download data for as a comma separated string", type=str, default="all" ) parser.add_argument( "--path_to_mrpc", help="path to directory containing extracted MRPC data, msr_paraphrase_train.txt and msr_paraphrase_text.txt", type=str, default="", ) args = parser.parse_args(arguments) if not os.path.isdir(args.data_dir): os.mkdir(args.data_dir) tasks = get_tasks(args.tasks) for task in tasks: if task == "MRPC": format_mrpc(args.data_dir, args.path_to_mrpc) elif task == "diagnostic": download_diagnostic(args.data_dir) else: download_and_extract(task, args.data_dir) if __name__ == "__main__": sys.exit(main(sys.argv[1:]))
transformers/utils/download_glue_data.py/0
{ "file_path": "transformers/utils/download_glue_data.py", "repo_id": "transformers", "token_count": 3917 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script is used to get the models for which to run slow CI. A new model added in a pull request will be included, as well as models specified in a GitHub pull request's comment with a prefix `run-slow`, `run_slow` or `run slow`. For example, the commit message `run_slow: bert, gpt2` will give `bert` and `gpt2`. Usage: ```bash python utils/pr_slow_ci_models.py ``` """ import argparse import os.path import re import string from pathlib import Path from typing import List from git import Repo PATH_TO_REPO = Path(__file__).parent.parent.resolve() def get_new_python_files_between_commits(base_commit: str, commits: List[str]) -> List[str]: """ Get the list of added python files between a base commit and one or several commits. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). base_commit (`str`): The commit reference of where to compare for the diff. This is the current commit, not the branching point! commits (`List[str]`): The list of commits with which to compare the repo at `base_commit` (so the branching point). Returns: `List[str]`: The list of python files added between a base commit and one or several commits. """ code_diff = [] for commit in commits: for diff_obj in commit.diff(base_commit): # We always add new python files if diff_obj.change_type == "A" and diff_obj.b_path.endswith(".py"): code_diff.append(diff_obj.b_path) return code_diff def get_new_python_files() -> List[str]: """ Return a list of python files that have been added between the current head and the main branch. Returns: `List[str]`: The list of python files added. """ repo = Repo(PATH_TO_REPO) try: # For the cases where the main branch exists locally main = repo.refs.main except AttributeError: # On GitHub Actions runners, it doesn't have local main branch main = repo.remotes.origin.refs.main print(f"main is at {main.commit}") print(f"Current head is at {repo.head.commit}") branching_commits = repo.merge_base(main, repo.head) for commit in branching_commits: print(f"Branching commit: {commit}") return get_new_python_files_between_commits(repo.head.commit, branching_commits) def get_new_model(): new_files = get_new_python_files() reg = re.compile(r"src/transformers/models/(.*)/modeling_.*\.py") new_model = "" for x in new_files: find_new_model = reg.findall(x) if len(find_new_model) > 0: new_model = find_new_model[0] # It's unlikely we have 2 new modeling files in a pull request. break return new_model def parse_message(message: str) -> str: """ Parses a GitHub pull request's comment to find the models specified in it to run slow CI. Args: message (`str`): The body of a GitHub pull request's comment. Returns: `str`: The substring in `message` after `run-slow`, run_slow` or run slow`. If no such prefix is found, the empty string is returned. """ if message is None: return "" message = message.strip().lower() # run-slow: model_1, model_2 if not message.startswith(("run-slow", "run_slow", "run slow")): return "" message = message[len("run slow") :] # remove leading `:` while message.strip().startswith(":"): message = message.strip()[1:] return message def get_models(message: str): models = parse_message(message) return models.replace(",", " ").split() def check_model_names(model_name: str): allowed = string.ascii_letters + string.digits + "_" return not (model_name.startswith("_") or model_name.endswith("_")) and all(c in allowed for c in model_name) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--message", type=str, default="", help="The content of a comment.") parser.add_argument("--quantization", action="store_true", help="If we collect quantization tests") args = parser.parse_args() new_model = get_new_model() specified_models = get_models(args.message) models = ([] if new_model == "" else [new_model]) + specified_models # a guard for strange model names models = [model for model in models if check_model_names(model)] # Add prefix final_list = [] for model in models: if not args.quantization: if os.path.isdir(f"tests/models/{model}"): final_list.append(f"models/{model}") elif os.path.isdir(f"tests/{model}") and model != "quantization": final_list.append(model) elif os.path.isdir(f"tests/quantization/{model}"): final_list.append(f"quantization/{model}") print(sorted(set(final_list)))
transformers/utils/pr_slow_ci_models.py/0
{ "file_path": "transformers/utils/pr_slow_ci_models.py", "repo_id": "transformers", "token_count": 2025 }
import numpy as np from transformers import Pipeline def softmax(outputs): maxes = np.max(outputs, axis=-1, keepdims=True) shifted_exp = np.exp(outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) class PairClassificationPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "second_text" in kwargs: preprocess_kwargs["second_text"] = kwargs["second_text"] return preprocess_kwargs, {}, {} def preprocess(self, text, second_text=None): return self.tokenizer(text, text_pair=second_text, return_tensors=self.framework) def _forward(self, model_inputs): return self.model(**model_inputs) def postprocess(self, model_outputs): logits = model_outputs.logits[0].numpy() probabilities = softmax(logits) best_class = np.argmax(probabilities) label = self.model.config.id2label[best_class] score = probabilities[best_class].item() logits = logits.tolist() return {"label": label, "score": score, "logits": logits}
transformers/utils/test_module/custom_pipeline.py/0
{ "file_path": "transformers/utils/test_module/custom_pipeline.py", "repo_id": "transformers", "token_count": 453 }
# BCO Trainer [![](https://img.shields.io/badge/All_models-BCO-blue)](https://huggingface.co/models?other=bco,trl) TRL supports the Binary Classifier Optimization (BCO). The [BCO](https://huggingface.co/papers/2404.04656) authors train a binary classifier whose logit serves as a reward so that the classifier maps {prompt, chosen completion} pairs to 1 and {prompt, rejected completion} pairs to 0. For a full example have a look at [`examples/scripts/bco.py`]. ## Expected dataset type The [`BCOTrainer`] requires an [unpaired preference dataset](dataset_formats#unpaired-preference). The [`BCOTrainer`] supports both [conversational](dataset_formats#conversational) and [standard](dataset_formats#standard) dataset format. When provided with a conversational dataset, the trainer will automatically apply the chat template to the dataset. ## Expected model format The BCO trainer expects a model of `AutoModelForCausalLM`, compared to PPO that expects `AutoModelForCausalLMWithValueHead` for the value function. ## Using the `BCOTrainer` For a detailed example have a look at the `examples/scripts/bco.py` script. At a high level we need to initialize the `BCOTrainer` with a `model` we wish to train and a reference `ref_model` which we will use to calculate the implicit rewards of the preferred and rejected response. The `beta` refers to the hyperparameter of the implicit reward, and the dataset contains the 3 entries listed above. Note that the `model` and `ref_model` need to have the same architecture (ie decoder only or encoder-decoder). ```py training_args = BCOConfig( beta=0.1, ) bco_trainer = BCOTrainer( model, model_ref, args=training_args, train_dataset=train_dataset, processing_class=tokenizer, ) ``` After this one can then call: ```py bco_trainer.train() ``` ## Underlying Distribution matching (UDM) In practical scenarios, the thumbs-up and thumbs-down datasets are likely to have divergent underlying distributions of prompts. Consider an LLM deployed for user feedback: if the model excels in writing tasks but underperforms in coding, the thumbs-up dataset will be dominated by writing-related prompts, while the thumbs-down dataset will contain mostly coding-related prompts. If the prompts in your desired and undesired datasets differ a lot, it is useful to enable UDM. Choose an embedding model and tokenizer: ```py embedding_model = AutoModel.from_pretrained(your_model_id) embedding_tokenizer = AutoTokenizer.from_pretrained(your_model_id) # customize this function depending on your embedding model def embed_prompt(input_ids, attention_mask, model): outputs = model(input_ids=input_ids, attention_mask=attention_mask) return outputs.last_hidden_state.mean(dim=1) embedding_model = Accelerator().prepare_model(self.embedding_model) embedding_func = partial(embed_prompt, model=embedding_model) ``` Set `prompt_sample_size` to define how many prompts are selected to train the UDM classifier and start the training with the provided embedding function: ```py training_args = BCOConfig( beta=0.1, prompt_sample_size=512, ) bco_trainer = BCOTrainer( model, model_ref, args=training_args, train_dataset=train_dataset, processing_class=tokenizer, embedding_func=embedding_func, embedding_tokenizer=self.embedding_tokenizer, ) bco_trainer.train() ``` ### For Mixture of Experts Models: Enabling the auxiliary loss MOEs are the most efficient if the load is about equally distributed between experts. To ensure that we train MOEs similarly during preference-tuning, it is beneficial to add the auxiliary loss from the load balancer to the final loss. This option is enabled by setting `output_router_logits=True` in the model config (e.g. MixtralConfig). To scale how much the auxiliary loss contributes to the total loss, use the hyperparameter `router_aux_loss_coef=...` (default: 0.001). ## BCOTrainer [[autodoc]] BCOTrainer ## BCOConfig [[autodoc]] BCOConfig
trl/docs/source/bco_trainer.md/0
{ "file_path": "trl/docs/source/bco_trainer.md", "repo_id": "trl", "token_count": 1222 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from peft import LoraConfig from transformers import AutoTokenizer, HfArgumentParser, load_tool from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer, TextEnvironment os.environ["HF_ALLOW_CODE_EVAL"] = "1" os.environ["TOKENIZERS_PARALLELISM"] = "false" @dataclass class ScriptArguments: model_name: Optional[str] = field(default="bigcode/starcoderbase", metadata={"help": "the model name"}) log_with: Optional[str] = field(default=None, metadata={"help": "use 'wandb' to log with wandb"}) learning_rate: Optional[float] = field(default=1e-5, metadata={"help": "the learning rate"}) mini_batch_size: Optional[int] = field(default=1, metadata={"help": "the PPO minibatch size"}) batch_size: Optional[int] = field(default=32, metadata={"help": "the batch size"}) gradient_accumulation_steps: Optional[int] = field( default=16, metadata={"help": "the number of gradient accumulation steps"} ) max_new_tokens: Optional[int] = field(default=256, metadata={"help": "max number of generated tokens per turn"}) ppo_epochs: Optional[int] = field(default=1, metadata={"help": "max number of ppo epochs"}) iterations: Optional[int] = field(default=1000, metadata={"help": "the number of iterations"}) seed: Optional[int] = field(default=0, metadata={"help": "the random seed"}) parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", target_modules=["c_proj", "c_attn", "q_attn"], ) # set up models model = AutoModelForCausalLMWithValueHead.from_pretrained( script_args.model_name, use_auth_token=True, trust_remote_code=True, load_in_4bit=True, peft_config=lora_config, ) tokenizer = AutoTokenizer.from_pretrained(script_args.model_name, use_auth_token=True) tokenizer.pad_token = tokenizer.eos_token # system prompt prompt = """\ Answer the following question: Q: In which branch of the arts is Patricia Neary famous? A: Ballets A2: <request><Wiki>Patricia Neary<call>Patricia Neary (born October 27, 1942) is an American ballerina, choreographer and ballet director, who has been particularly active in Switzerland. She has also been a highly successful ambassador for the Balanchine Trust, bringing George Balanchine's ballets to 60 cities around the globe.<response> Result=Ballets<submit> Q: Who won Super Bowl XX? A: Chicago Bears A2: <request><Wiki>Super Bowl XX<call>Super Bowl XX was an American football game between the National Football Conference (NFC) champion Chicago Bears and the American Football Conference (AFC) champion New England Patriots to decide the National Football League (NFL) champion for the 1985 season. The Bears defeated the Patriots by the score of 46–10, capturing their first NFL championship (and Chicago's first overall sports victory) since 1963, three years prior to the birth of the Super Bowl. Super Bowl XX was played on January 26, 1986 at the Louisiana Superdome in New Orleans.<response> Result=Chicago Bears<submit> Q: """ generation_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, "eos_token_id": -1, "max_new_tokens": script_args.max_new_tokens, } # trainer config = PPOConfig( batch_size=script_args.batch_size, model_name=script_args.model_name, learning_rate=script_args.learning_rate, log_with=script_args.log_with, mini_batch_size=script_args.mini_batch_size, ppo_epochs=script_args.ppo_epochs, gradient_accumulation_steps=script_args.gradient_accumulation_steps, seed=script_args.seed, optimize_cuda_cache=True, ) ppo_trainer = PPOTrainer(args=config, model=model, tokenizer=tokenizer) dataset = load_dataset("mandarjoshi/trivia_qa", "rc", split="train") local_seed = script_args.seed + ppo_trainer.accelerator.process_index * 100003 # Prime dataset = dataset.shuffle(local_seed) def data_generator(): for i in range(len(dataset)): yield dataset[i]["question"], list(dataset[i]["answer"]["normalized_aliases"]) gen = data_generator() gen = iter(gen) def generate_data(n): tasks, answers = [], [] for _i in range(n): q, a = next(gen) tasks.append(q) answers.append(a) return tasks, answers def exact_match_reward(responses, answers=None): """Reward if generated response contains correct answer.""" rewards = [] for response, answer in zip(responses, answers): reward = 0.0 for a in answer: if a.lower() in response.lower(): reward += 1.0 break rewards.append(torch.tensor(reward)) return rewards def tool_fn(x): # limit the amount of tokens return tool(x).split("\n")[1][:600] # text env tool = load_tool("vwxyzjn/pyserini-wikipedia-kilt-doc") text_env = TextEnvironment( model, tokenizer, {"Wiki": tool_fn}, exact_match_reward, prompt, generation_kwargs=generation_kwargs, max_tool_reponse=400, ) def print_trainable_parameters(model): trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) print_trainable_parameters(model) # main training loop for i in range(script_args.iterations): tasks, answers = generate_data(config.batch_size) queries, responses, masks, rewards, histories = text_env.run(tasks, answers=answers) train_stats = ppo_trainer.step(queries, responses, rewards, masks) response_texts = [tokenizer.decode(response) for response in responses] query_texts = [tokenizer.decode(query) for query in queries] texts = { "query": [qt.split("<submit>")[-1].strip() for qt in query_texts], "response": response_texts, "answer": [", ".join(item) for item in answers], } all_rewards = ppo_trainer.accelerator.gather(torch.tensor(rewards, device=ppo_trainer.accelerator.device)) ppo_trainer.log_stats(train_stats, texts, list(all_rewards), columns_to_log=["query", "response", "answer"]) if i % 100 == 0: ppo_trainer.save_pretrained(f"models/{script_args.model_name}_{script_args.seed}_{i}_triviaqa")
trl/examples/research_projects/tools/triviaqa.py/0
{ "file_path": "trl/examples/research_projects/tools/triviaqa.py", "repo_id": "trl", "token_count": 2589 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Run the ORPO training script with the following command with some example arguments. In general, the optimal configuration for ORPO will be similar to that of DPO without the need for a reference model: # regular: python examples/scripts/orpo.py \ --dataset_name trl-internal-testing/hh-rlhf-helpful-base-trl-style \ --model_name_or_path=gpt2 \ --per_device_train_batch_size 4 \ --max_steps 1000 \ --learning_rate 8e-6 \ --gradient_accumulation_steps 1 \ --logging_steps 10 \ --eval_steps 500 \ --output_dir="gpt2-aligned-orpo" \ --warmup_steps 150 \ --report_to wandb \ --bf16 \ --logging_first_step \ --no_remove_unused_columns # peft: python examples/scripts/orpo.py \ --dataset_name trl-internal-testing/hh-rlhf-helpful-base-trl-style \ --model_name_or_path=gpt2 \ --per_device_train_batch_size 4 \ --max_steps 1000 \ --learning_rate 8e-5 \ --gradient_accumulation_steps 1 \ --logging_steps 10 \ --eval_steps 500 \ --output_dir="gpt2-lora-aligned-orpo" \ --optim rmsprop \ --warmup_steps 150 \ --report_to wandb \ --bf16 \ --logging_first_step \ --no_remove_unused_columns \ --use_peft \ --lora_r=16 \ --lora_alpha=16 """ from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser from trl import ModelConfig, ORPOConfig, ORPOTrainer, ScriptArguments, get_peft_config from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE if __name__ == "__main__": parser = HfArgumentParser((ScriptArguments, ORPOConfig, ModelConfig)) script_args, training_args, model_args = parser.parse_args_into_dataclasses() ################ # Model & Tokenizer ################ model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code ) tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code ) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token ################ # Dataset ################ dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config) if tokenizer.chat_template is None: tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE ################ # Training ################ trainer = ORPOTrainer( model, args=training_args, train_dataset=dataset[script_args.dataset_train_split], eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, processing_class=tokenizer, peft_config=get_peft_config(model_args), ) # train and save the model trainer.train() # Save and push to hub trainer.save_model(training_args.output_dir) if training_args.push_to_hub: trainer.push_to_hub(dataset_name=script_args.dataset_name)
trl/examples/scripts/orpo.py/0
{ "file_path": "trl/examples/scripts/orpo.py", "repo_id": "trl", "token_count": 1363 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from datasets import Dataset from transformers import HfArgumentParser @dataclass class ScriptArguments: r""" Arguments for the script. Args: test_size (`float`, *optional*, defaults to `0.1`): Fraction of the dataset to include in the test split. push_to_hub (`bool`, *optional*, defaults to `False`): Whether to push the dataset to the Hugging Face Hub. repo_id (`str`, *optional*, defaults to `"trl-internal-testing/zen"`): Hugging Face repository ID to push the dataset to. """ test_size: float = field( default=0.1, metadata={"help": "Fraction of the dataset to include in the test split."}, ) push_to_hub: bool = field( default=False, metadata={"help": "Whether to push the dataset to the Hugging Face Hub."}, ) repo_id: str = field( default="trl-internal-testing/zen", metadata={"help": "Hugging Face repository ID to push the dataset to."}, ) def main(test_size, push_to_hub, repo_id): # fmt: off standard_language_modeling_dataset = Dataset.from_dict({ "text": [ "Beautiful is better than ugly.", "Explicit is better than implicit.", "Simple is better than complex.", "Complex is better than complicated.", "Flat is better than nested.", "Sparse is better than dense.", "Readability counts.", "Special cases aren't special enough to break the rules.", "Although practicality beats purity.", "Errors should never pass silently.", "Unless explicitly silenced.", "In the face of ambiguity, refuse the temptation to guess.", "There should be one-- and preferably only one --obvious way to do it.", "Although that way may not be obvious at first unless you're Dutch.", "Now is better than never.", "Although never is often better than *right* now.", "If the implementation is hard to explain, it's a bad idea.", "If the implementation is easy to explain, it may be a good idea.", "Namespaces are one honking great idea -- let's do more of those!", ], }) standard_language_modeling_dataset = standard_language_modeling_dataset.train_test_split(test_size=test_size, shuffle=False) if push_to_hub: standard_language_modeling_dataset.push_to_hub(repo_id, config_name="standard_language_modeling") standard_prompt_only_dataset = Dataset.from_dict({ "prompt": [ "Beautiful is better than", "Explicit is", "Simple is better", "Complex", "Flat is better than", "Sparse is better", "Readability", "Special cases aren't special", "Although practicality beats", "Errors should never", "Unless explicitly", "In the face of ambiguity, refuse", "There should be one-- and preferably", "Although that way may not be obvious at first unless you're", "Now is", "Although never is often", "If the implementation is hard to explain,", "If the implementation is easy", "Namespaces are one honking great", ], }) standard_prompt_only_dataset = standard_prompt_only_dataset.train_test_split(test_size=test_size, shuffle=False) if push_to_hub: standard_prompt_only_dataset.push_to_hub(repo_id, config_name="standard_prompt_only") standard_prompt_completion_dataset = Dataset.from_dict({ "prompt": [ "Beautiful is better than", "Explicit is", "Simple is better", "Complex", "Flat is better than", "Sparse is better", "Readability", "Special cases aren't special", "Although practicality beats", "Errors should never", "Unless explicitly", "In the face of ambiguity, refuse", "There should be one-- and preferably", "Although that way may not be obvious at first unless you're", "Now is", "Although never is often", "If the implementation is hard to explain,", "If the implementation is easy", "Namespaces are one honking great", ], "completion": [ " ugly.", " better than implicit.", " than complex.", " is better than complicated.", " nested.", " than dense.", " counts.", " enough to break the rules.", " purity.", " pass silently.", " silenced.", " the temptation to guess.", " only one --obvious way to do it.", " Dutch.", " better than never.", " better than *right* now.", " it's a bad idea.", " to explain, it may be a good idea.", " idea -- let's do more of those!", ], }) standard_prompt_completion_dataset = standard_prompt_completion_dataset.train_test_split(test_size=test_size, shuffle=False) if push_to_hub: standard_prompt_completion_dataset.push_to_hub(repo_id, config_name="standard_prompt_completion") standard_preference_dataset = Dataset.from_dict({ "prompt": [ "Beautiful is better than", "Explicit is", "Simple is better", "Complex", "Flat is better than", "Sparse is better", "Readability", "Special cases aren't special", "Although practicality beats", "Errors should never", "Unless explicitly", "In the face of ambiguity, refuse", "There should be one-- and preferably", "Although that way may not be obvious at first unless you're", "Now is", "Although never is often", "If the implementation is hard to explain,", "If the implementation is easy", "Namespaces are one honking great", ], "chosen": [ " ugly.", " better than implicit.", " than complex.", " is better than complicated.", " nested.", " than dense.", " counts.", " enough to break the rules.", " purity.", " pass silently.", " silenced.", " the temptation to guess.", " only one --obvious way to do it.", " Dutch.", " better than never.", " better than *right* now.", " it's a bad idea.", " to explain, it may be a good idea.", " idea -- let's do more of those!", ], "rejected": [ " the moon.", " worse than nothing.", " than a long vacation.", " is always the answer.", " chocolate.", " without any context.", " is optional.", " enough to become unicorns.", " reality.", " pass their driving test.", " forgotten.", " the opportunity to laugh.", " two or more confusing methods.", " a time traveler.", " never better.", " not even a possibility.", " it's clearly the best choice.", " it's probably magic.", " watermelon -- let's plant some!", ], }) standard_preference_dataset = standard_preference_dataset.train_test_split(test_size=test_size, shuffle=False) if push_to_hub: standard_preference_dataset.push_to_hub(repo_id, config_name="standard_preference") standard_implicit_prompt_preference_dataset = Dataset.from_dict({ "chosen": [ "Beautiful is better than ugly.", "Explicit is better than implicit.", "Simple is better than complex.", "Complex is better than complicated.", "Flat is better than nested.", "Sparse is better than dense.", "Readability counts.", "Special cases aren't special enough to break the rules.", "Although practicality beats purity.", "Errors should never pass silently.", "Unless explicitly silenced.", "In the face of ambiguity, refuse the temptation to guess.", "There should be one-- and preferably only one --obvious way to do it.", "Although that way may not be obvious at first unless you're Dutch.", "Now is better than never.", "Although never is often better than *right* now.", "If the implementation is hard to explain, it's a bad idea.", "If the implementation is easy to explain, it may be a good idea.", "Namespaces are one honking great idea -- let's do more of those!", ], "rejected": [ "Beautiful is better than the moon.", "Explicit is worse than nothing.", "Simple is better than a long vacation.", "Complex is always the answer.", "Flat is better than chocolate.", "Sparse is better without any context.", "Readability is optional.", "Special cases aren't special enough to become unicorns.", "Although practicality beats reality.", "Errors should never pass their driving test.", "Unless explicitly forgotten.", "In the face of ambiguity, refuse the opportunity to laugh.", "There should be one-- and preferably two or more confusing methods.", "Although that way may not be obvious at first unless you're a time traveler.", "Now is never better.", "Although never is often not even a possibility.", "If the implementation is hard to explain, it's clearly the best choice.", "If the implementation is easy it's probably magic.", "Namespaces are one honking great watermelon -- let's plant some!", ], }) standard_implicit_prompt_preference_dataset = standard_implicit_prompt_preference_dataset.train_test_split(test_size=test_size, shuffle=False) if push_to_hub: standard_implicit_prompt_preference_dataset.push_to_hub(repo_id, config_name="standard_implicit_prompt_preference") standard_unpaired_preference_dataset = Dataset.from_dict({ "prompt": [ "Beautiful is better than", "Explicit is", "Simple is better", "Complex", "Flat is better than", "Sparse is better", "Readability", "Special cases aren't special", "Although practicality beats", "Errors should never", "Unless explicitly", "In the face of ambiguity, refuse", "There should be one-- and preferably", "Although that way may not be obvious at first unless you're", "Now is", "Although never is often", "If the implementation is hard to explain,", "If the implementation is easy", "Namespaces are one honking great", ], "completion": [ " ugly.", " worse than nothing.", " than a long vacation.", " is better than complicated.", " nested.", " without any context.", " counts.", " enough to become unicorns.", " purity.", " pass silently.", " forgotten.", " the temptation to guess.", " only one --obvious way to do it.", " a time traveler.", " better than never.", " not even a possibility.", " it's a bad idea.", " it's probably magic.", " watermelon -- let's plant some!", ], "label": [True, False, False, True, True, False, True, False, True, True, False, True, True, False, True, False, True, False, False], }) standard_unpaired_preference_dataset = standard_unpaired_preference_dataset.train_test_split(test_size=test_size, shuffle=False) if push_to_hub: standard_unpaired_preference_dataset.push_to_hub(repo_id, config_name="standard_unpaired_preference") standard_stepwise_supervision_dataset = Dataset.from_dict({ "prompt": [ "Beautiful is better than", "Explicit is better than", "Simple is better than", "Complex is better than", "Flat is better than", "Sparse is better than", "Readability counts", "Special cases aren't special enough", "Although practicality beats", "Errors should never pass", "In the face of ambiguity, refuse", "There should be one-- and preferably only one --", "Although that way may not be", "Now is better than", "Never is often better than", "If the implementation is hard to explain, it's", "If the implementation is easy to explain, it", "Namespaces are one", "Although practicality sometimes beats purity,", ], "completions":[ [", let me think...", " ugly."], [", of course,", " implicit.", " because clarity matters."], ["... let's keep it basic,", " complex."], [" when needed,", " complicated."], [" in terms of structure,", " nested."], ["... especially for readability."], [" especially when others read it."], [", unless...", " they follow the rules."], [" some theoretical elegance,", " purity."], [" silently,", " unless explicitly silenced."], [" the temptation to guess."], [" way to do it,"," but sometimes it's not obvious.", " especially when there's more than one possibility."], [" clear at first,", " it will eventually emerge."], [" later."], [" problematic fixes."], [" likely because it's too complicated."], [" might be a good design."], [" of those great ideas,", " that solve many problems."], [" the code should still aim for balance."], ], "labels": [ [False, True], [False, True, False], [False, True], [True, True], [True, False], [True], [False], [True, False], [False, False], [False, False], [True], [True, True, False], [True, True], [False], [True], [False], [False], [True, True], [False] ] }) standard_stepwise_supervision_dataset = standard_stepwise_supervision_dataset.train_test_split(test_size=test_size, shuffle=False) if push_to_hub: standard_stepwise_supervision_dataset.push_to_hub(repo_id, config_name="standard_stepwise_supervision") conversational_language_modeling_dataset = Dataset.from_dict({ "messages": [ [{"role": "user", "content": "What is better than ugly?"}, {"role": "assistant", "content": "Beautiful."},], [{"role": "user", "content": "What is better than implicit?"}, {"role": "assistant", "content": "Explicit."}], [{"role": "user", "content": "What is better than complex?"}, {"role": "assistant", "content": "Simple."}], [{"role": "user", "content": "What is better than complicated?"}, {"role": "assistant", "content": "Complex."}], [{"role": "user", "content": "What is better than nested?"}, {"role": "assistant", "content": "Flat."}], [{"role": "user", "content": "What is better than dense?"}, {"role": "assistant", "content": "Sparse."}], [{"role": "user", "content": "What counts?"}, {"role": "assistant", "content": "Readability."}], [{"role": "user", "content": "Are special cases enough to break the rules?"}, {"role": "assistant", "content": "No, special cases aren't special enough to break the rules."}], [{"role": "user", "content": "What beats purity?"}, {"role": "assistant", "content": "Practicality."}], [{"role": "user", "content": "What should never pass silently?"}, {"role": "assistant", "content": "Errors."}], [{"role": "user", "content": "When can errors pass silently?"}, {"role": "assistant", "content": "When explicitly silenced."}], [{"role": "user", "content": "What should you do in the face of ambiguity?"}, {"role": "assistant", "content": "Refuse the temptation to guess."}], [{"role": "user", "content": "How many ways should there be to do it?"}, {"role": "assistant", "content": "One, and preferably only one."}], [{"role": "user", "content": "For whom may the way not be obvious at first?"}, {"role": "assistant", "content": "Dutch."}], [{"role": "user", "content": "What is better than never?"}, {"role": "assistant", "content": "Now is better than never."}], [{"role": "user", "content": "Is never better than *right* now?"}, {"role": "assistant", "content": "Yes, often."}], [{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}, {"role": "assistant", "content": "It means it's a bad idea."}], [{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}, {"role": "assistant", "content": "It means it may be a good idea."}], [{"role": "user", "content": "Any great ideas?"}, {"role": "assistant", "content": "Namespaces are one honking great idea."}], ], }) conversational_language_modeling_dataset = conversational_language_modeling_dataset.train_test_split(test_size=test_size, shuffle=False) if push_to_hub: conversational_language_modeling_dataset.push_to_hub(repo_id, config_name="conversational_language_modeling") conversational_prompt_only_dataset = Dataset.from_dict({ "prompt": [ [{"role": "user", "content": "What is better than ugly?"}], [{"role": "user", "content": "What is better than implicit?"}], [{"role": "user", "content": "What is better than complex?"}], [{"role": "user", "content": "What is better than complicated?"}], [{"role": "user", "content": "What is better than nested?"}], [{"role": "user", "content": "What is better than dense?"}], [{"role": "user", "content": "What counts?"}], [{"role": "user", "content": "Are special cases enough to break the rules?"}], [{"role": "user", "content": "What beats purity?"}], [{"role": "user", "content": "What should never pass silently?"}], [{"role": "user", "content": "When can errors pass silently?"}], [{"role": "user", "content": "What should you do in the face of ambiguity?"}], [{"role": "user", "content": "How many ways should there be to do it?"}], [{"role": "user", "content": "For whom may the way not be obvious at first?"}], [{"role": "user", "content": "What is better than never?"}], [{"role": "user", "content": "Is never better than *right* now?"}], [{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}], [{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}], [{"role": "user", "content": "Any great ideas?"}], ], }) conversational_prompt_only_dataset = conversational_prompt_only_dataset.train_test_split(test_size=test_size, shuffle=False) if push_to_hub: conversational_prompt_only_dataset.push_to_hub(repo_id, config_name="conversational_prompt_only") conversational_prompt_completion_dataset = Dataset.from_dict({ "prompt": [ [{"role": "user", "content": "What is better than ugly?"}], [{"role": "user", "content": "What is better than implicit?"}], [{"role": "user", "content": "What is better than complex?"}], [{"role": "user", "content": "What is better than complicated?"}], [{"role": "user", "content": "What is better than nested?"}], [{"role": "user", "content": "What is better than dense?"}], [{"role": "user", "content": "What counts?"}], [{"role": "user", "content": "Are special cases enough to break the rules?"}], [{"role": "user", "content": "What beats purity?"}], [{"role": "user", "content": "What should never pass silently?"}], [{"role": "user", "content": "When can errors pass silently?"}], [{"role": "user", "content": "What should you do in the face of ambiguity?"}], [{"role": "user", "content": "How many ways should there be to do it?"}], [{"role": "user", "content": "For whom may the way not be obvious at first?"}], [{"role": "user", "content": "What is better than never?"}], [{"role": "user", "content": "Is never better than *right* now?"}], [{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}], [{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}], [{"role": "user", "content": "Any great ideas?"}], ], "completion": [ [{"role": "assistant", "content": "Beautiful."}], [{"role": "assistant", "content": "Explicit."}], [{"role": "assistant", "content": "Simple."}], [{"role": "assistant", "content": "Complex."}], [{"role": "assistant", "content": "Flat."}], [{"role": "assistant", "content": "Sparse."}], [{"role": "assistant", "content": "Readability."}], [{"role": "assistant", "content": "No, special cases aren't special enough to break the rules."}], [{"role": "assistant", "content": "Practicality."}], [{"role": "assistant", "content": "Errors."}], [{"role": "assistant", "content": "When explicitly silenced."}], [{"role": "assistant", "content": "Refuse the temptation to guess."}], [{"role": "assistant", "content": "One, and preferably only one."}], [{"role": "assistant", "content": "Dutch."}], [{"role": "assistant", "content": "Now is better than never."}], [{"role": "assistant", "content": "Yes, often."}], [{"role": "assistant", "content": "It means it's a bad idea."}], [{"role": "assistant", "content": "It means it may be a good idea."}], [{"role": "assistant", "content": "Namespaces are one honking great idea."}], ], }) conversational_prompt_completion_dataset = conversational_prompt_completion_dataset.train_test_split(test_size=test_size, shuffle=False) if push_to_hub: conversational_prompt_completion_dataset.push_to_hub(repo_id, config_name="conversational_prompt_completion") conversational_preference_dataset = Dataset.from_dict({ "prompt": [ [{"role": "user", "content": "What is better than ugly?"}], [{"role": "user", "content": "What is better than implicit?"}], [{"role": "user", "content": "What is better than complex?"}], [{"role": "user", "content": "What is better than complicated?"}], [{"role": "user", "content": "What is better than nested?"}], [{"role": "user", "content": "What is better than dense?"}], [{"role": "user", "content": "What counts?"}], [{"role": "user", "content": "Are special cases enough to break the rules?"}], [{"role": "user", "content": "What beats purity?"}], [{"role": "user", "content": "What should never pass silently?"}], [{"role": "user", "content": "When can errors pass silently?"}], [{"role": "user", "content": "What should you do in the face of ambiguity?"}], [{"role": "user", "content": "How many ways should there be to do it?"}], [{"role": "user", "content": "For whom may the way not be obvious at first?"}], [{"role": "user", "content": "What is better than never?"}], [{"role": "user", "content": "Is never better than *right* now?"}], [{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}], [{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}], [{"role": "user", "content": "Any great ideas?"}], ], "chosen": [ [{"role": "assistant", "content": "Beautiful."}], [{"role": "assistant", "content": "Explicit."}], [{"role": "assistant", "content": "Simple."}], [{"role": "assistant", "content": "Complex."}], [{"role": "assistant", "content": "Flat."}], [{"role": "assistant", "content": "Sparse."}], [{"role": "assistant", "content": "Readability."}], [{"role": "assistant", "content": "No, special cases aren't special enough to break the rules."}], [{"role": "assistant", "content": "Practicality."}], [{"role": "assistant", "content": "Errors."}], [{"role": "assistant", "content": "When explicitly silenced."}], [{"role": "assistant", "content": "Refuse the temptation to guess."}], [{"role": "assistant", "content": "One, and preferably only one."}], [{"role": "assistant", "content": "Dutch."}], [{"role": "assistant", "content": "Now is better than never."}], [{"role": "assistant", "content": "Yes, often."}], [{"role": "assistant", "content": "It means it's a bad idea."}], [{"role": "assistant", "content": "It means it may be a good idea."}], [{"role": "assistant", "content": "Namespaces are one honking great idea."}], ], "rejected": [ [{"role": "assistant", "content": "Acceptable."}], [{"role": "assistant", "content": "Explained."}], [{"role": "assistant", "content": "Very complex."}], [{"role": "assistant", "content": "Very complicated."}], [{"role": "assistant", "content": "Circular."}], [{"role": "assistant", "content": "Heavy."}], [{"role": "assistant", "content": "Looking complicated."}], [{"role": "assistant", "content": "Yes, special cases are special enough to break the rules."}], [{"role": "assistant", "content": "Nothing."}], [{"role": "assistant", "content": "Warnings."}], [{"role": "assistant", "content": "Never."}], [{"role": "assistant", "content": "Give up."}], [{"role": "assistant", "content": "As many as possible."}], [{"role": "assistant", "content": "French."}], [{"role": "assistant", "content": "Some day."}], [{"role": "assistant", "content": "No, never."}], [{"role": "assistant", "content": "It means it's a good idea."}], [{"role": "assistant", "content": "It means it's a bad idea."}], [{"role": "assistant", "content": "Recursion."}], ], }) conversational_preference_dataset = conversational_preference_dataset.train_test_split(test_size=test_size, shuffle=False) if push_to_hub: conversational_preference_dataset.push_to_hub(repo_id, config_name="conversational_preference") conversational_implicit_prompt_preference_dataset = Dataset.from_dict({ "chosen": [ [{"role": "user", "content": "What is better than ugly?"}, {"role": "assistant", "content": "Beautiful."}], [{"role": "user", "content": "What is better than implicit?"}, {"role": "assistant", "content": "Explicit."}], [{"role": "user", "content": "What is better than complex?"}, {"role": "assistant", "content": "Simple."}], [{"role": "user", "content": "What is better than complicated?"}, {"role": "assistant", "content": "Complex."}], [{"role": "user", "content": "What is better than nested?"}, {"role": "assistant", "content": "Flat."}], [{"role": "user", "content": "What is better than dense?"}, {"role": "assistant", "content": "Sparse."}], [{"role": "user", "content": "What counts?"}, {"role": "assistant", "content": "Readability."}], [{"role": "user", "content": "Are special cases enough to break the rules?"}, {"role": "assistant", "content": "No, special cases aren't special enough to break the rules."}], [{"role": "user", "content": "What beats purity?"}, {"role": "assistant", "content": "Practicality."}], [{"role": "user", "content": "What should never pass silently?"}, {"role": "assistant", "content": "Errors."}], [{"role": "user", "content": "When can errors pass silently?"}, {"role": "assistant", "content": "When explicitly silenced."}], [{"role": "user", "content": "What should you do in the face of ambiguity?"}, {"role": "assistant", "content": "Refuse the temptation to guess."}], [{"role": "user", "content": "How many ways should there be to do it?"}, {"role": "assistant", "content": "One, and preferably only one."}], [{"role": "user", "content": "For whom may the way not be obvious at first?"}, {"role": "assistant", "content": "Dutch."}], [{"role": "user", "content": "What is better than never?"}, {"role": "assistant", "content": "Now is better than never."}], [{"role": "user", "content": "Is never better than *right* now?"}, {"role": "assistant", "content": "Yes, often."}], [{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}, {"role": "assistant", "content": "It means it's a bad idea."}], [{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}, {"role": "assistant", "content": "It means it may be a good idea."}], [{"role": "user", "content": "Any great ideas?"}, {"role": "assistant", "content": "Namespaces are one honking great idea."}], ], "rejected": [ [{"role": "user", "content": "What is better than ugly?"}, {"role": "assistant", "content": "Acceptable."}], [{"role": "user", "content": "What is better than implicit?"}, {"role": "assistant", "content": "Explained."}], [{"role": "user", "content": "What is better than complex?"}, {"role": "assistant", "content": "Very complex."}], [{"role": "user", "content": "What is better than complicated?"}, {"role": "assistant", "content": "Very complicated."}], [{"role": "user", "content": "What is better than nested?"}, {"role": "assistant", "content": "Circular."}], [{"role": "user", "content": "What is better than dense?"}, {"role": "assistant", "content": "Heavy."}], [{"role": "user", "content": "What counts?"}, {"role": "assistant", "content": "Looking complicated."}], [{"role": "user", "content": "Are special cases enough to break the rules?"}, {"role": "assistant", "content": "Yes, special cases are special enough to break the rules."}], [{"role": "user", "content": "What beats purity?"}, {"role": "assistant", "content": "Nothing."}], [{"role": "user", "content": "What should never pass silently?"}, {"role": "assistant", "content": "Warnings."}], [{"role": "user", "content": "When can errors pass silently?"}, {"role": "assistant", "content": "Never."}], [{"role": "user", "content": "What should you do in the face of ambiguity?"}, {"role": "assistant", "content": "Give up."}], [{"role": "user", "content": "How many ways should there be to do it?"}, {"role": "assistant", "content": "As many as possible."}], [{"role": "user", "content": "For whom may the way not be obvious at first?"}, {"role": "assistant", "content": "French."}], [{"role": "user", "content": "What is better than never?"}, {"role": "assistant", "content": "Some day."}], [{"role": "user", "content": "Is never better than *right* now?"}, {"role": "assistant", "content": "No, never."}], [{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}, {"role": "assistant", "content": "It means it's a good idea."}], [{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}, {"role": "assistant", "content": "It means it's a bad idea."}], [{"role": "user", "content": "Any great ideas?"}, {"role": "assistant", "content": "Recursion."}], ], }) conversational_implicit_prompt_preference_dataset = conversational_implicit_prompt_preference_dataset.train_test_split(test_size=test_size, shuffle=False) if push_to_hub: conversational_implicit_prompt_preference_dataset.push_to_hub(repo_id, config_name="conversational_implicit_prompt_preference") conversational_unpaired_preference_dataset = Dataset.from_dict({ "prompt": [ [{"role": "user", "content": "What is better than ugly?"}], [{"role": "user", "content": "What is better than implicit?"}], [{"role": "user", "content": "What is better than complex?"}], [{"role": "user", "content": "What is better than complicated?"}], [{"role": "user", "content": "What is better than nested?"}], [{"role": "user", "content": "What is better than dense?"}], [{"role": "user", "content": "What counts?"}], [{"role": "user", "content": "Are special cases enough to break the rules?"}], [{"role": "user", "content": "What beats purity?"}], [{"role": "user", "content": "What should never pass silently?"}], [{"role": "user", "content": "When can errors pass silently?"}], [{"role": "user", "content": "What should you do in the face of ambiguity?"}], [{"role": "user", "content": "How many ways should there be to do it?"}], [{"role": "user", "content": "For whom may the way not be obvious at first?"}], [{"role": "user", "content": "What is better than never?"}], [{"role": "user", "content": "Is never better than *right* now?"}], [{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}], [{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}], [{"role": "user", "content": "Any great ideas?"}], ], "completion": [ [{'role': 'assistant', 'content': 'Beautiful.'}], [{'role': 'assistant', 'content': 'Explicit.'}], [{'role': 'assistant', 'content': 'Simple.'}], [{'role': 'assistant', 'content': 'Very complicated.'}], [{'role': 'assistant', 'content': 'Flat.'}], [{'role': 'assistant', 'content': 'Sparse.'}], [{'role': 'assistant', 'content': 'Readability.'}], [{'role': 'assistant', 'content': 'Yes, special cases are special enough to break the rules.'}], [{'role': 'assistant', 'content': 'Practicality.'}], [{'role': 'assistant', 'content': 'Warnings.'}], [{'role': 'assistant', 'content': 'When explicitly silenced.'}], [{'role': 'assistant', 'content': 'Give up.'}], [{'role': 'assistant', 'content': 'One, and preferably only one.'}], [{'role': 'assistant', 'content': 'French.'}], [{'role': 'assistant', 'content': 'Some day.'}], [{'role': 'assistant', 'content': 'Yes, often.'}], [{'role': 'assistant', 'content': "It means it's a bad idea."}], [{'role': 'assistant', 'content': 'It means it may be a good idea.'}], [{'role': 'assistant', 'content': 'Namespaces are one honking great idea.'}], ], "label": [True, True, True, False, True, True, True, False, True, False, True, False, True, False, False, True, True, True, True], }) conversational_unpaired_preference_dataset = conversational_unpaired_preference_dataset.train_test_split(test_size=test_size, shuffle=False) if push_to_hub: conversational_unpaired_preference_dataset.push_to_hub(repo_id, config_name="conversational_unpaired_preference") # fmt: on if __name__ == "__main__": parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] main(script_args.test_size, script_args.push_to_hub, script_args.repo_id)
trl/scripts/generate_zen_dataset.py/0
{ "file_path": "trl/scripts/generate_zen_dataset.py", "repo_id": "trl", "token_count": 15758 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from trl.trainer.dpo_trainer import DataCollatorForPreference class TestDataCollatorForPreference(unittest.TestCase): def setUp(self): self.collator = DataCollatorForPreference(pad_token_id=0) def assertTensorEqual(self, tensor1, tensor2): self.assertTrue(torch.equal(tensor1, tensor2), f"Tensors are not equal:\n{tensor1}\n{tensor2}") def test_padding_behavior(self): examples = [ {"prompt_input_ids": [1, 2, 3], "chosen_input_ids": [4, 5], "rejected_input_ids": [6]}, {"prompt_input_ids": [7, 8], "chosen_input_ids": [9, 10], "rejected_input_ids": [11, 12, 13]}, ] output = self.collator.torch_call(examples) expected_prompt_input_ids = torch.tensor([[1, 2, 3], [0, 7, 8]]) expected_prompt_attention_mask = torch.tensor([[1, 1, 1], [0, 1, 1]]) expected_chosen_input_ids = torch.tensor([[4, 5], [9, 10]]) expected_chosen_attention_mask = torch.tensor([[1, 1], [1, 1]]) expected_rejected_input_ids = torch.tensor([[6, 0, 0], [11, 12, 13]]) expected_rejected_attention_mask = torch.tensor([[1, 0, 0], [1, 1, 1]]) self.assertTensorEqual(output["prompt_input_ids"], expected_prompt_input_ids) self.assertTensorEqual(output["prompt_attention_mask"], expected_prompt_attention_mask) self.assertTensorEqual(output["chosen_input_ids"], expected_chosen_input_ids) self.assertTensorEqual(output["chosen_attention_mask"], expected_chosen_attention_mask) self.assertTensorEqual(output["rejected_input_ids"], expected_rejected_input_ids) self.assertTensorEqual(output["rejected_attention_mask"], expected_rejected_attention_mask) def test_optional_fields(self): examples = [ { "prompt_input_ids": [1], "chosen_input_ids": [2], "rejected_input_ids": [3], "pixel_values": [[[0.1, 0.2], [0.3, 0.4]]], # Example 3D tensor (1x2x2) }, { "prompt_input_ids": [4], "chosen_input_ids": [5], "rejected_input_ids": [6], "pixel_values": [[[0.5, 0.6], [0.7, 0.8]]], # Example 3D tensor (1x2x2) }, ] output = self.collator.torch_call(examples) expected_pixel_values = torch.tensor( [ [[[0.1, 0.2], [0.3, 0.4]]], [[[0.5, 0.6], [0.7, 0.8]]], ] ) # Shape: (2, 1, 2, 2) self.assertTensorEqual(output["pixel_values"], expected_pixel_values)
trl/tests/test_collators.py/0
{ "file_path": "trl/tests/test_collators.py", "repo_id": "trl", "token_count": 1448 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys from accelerate.commands.launch import launch_command, launch_command_parser from .scripts.chat import main as chat_main from .scripts.chat import make_parser as make_chat_parser from .scripts.dpo import make_parser as make_dpo_parser from .scripts.env import print_env from .scripts.grpo import make_parser as make_grpo_parser from .scripts.kto import make_parser as make_kto_parser from .scripts.sft import make_parser as make_sft_parser from .scripts.utils import TrlParser def main(): parser = TrlParser(prog="TRL CLI", usage="trl", allow_abbrev=False) # Add the subparsers subparsers = parser.add_subparsers(help="available commands", dest="command", parser_class=TrlParser) # Add the subparsers for every script make_chat_parser(subparsers) make_dpo_parser(subparsers) subparsers.add_parser("env", help="Print the environment information") make_grpo_parser(subparsers) make_kto_parser(subparsers) make_sft_parser(subparsers) # Parse the arguments args = parser.parse_args() if args.command == "chat": (chat_args,) = parser.parse_args_and_config() chat_main(chat_args) if args.command == "dpo": # Get the default args for the launch command dpo_training_script = os.path.join(os.path.dirname(os.path.abspath(__file__)), "scripts", "dpo.py") args = launch_command_parser().parse_args([dpo_training_script]) # Feed the args to the launch command args.training_script_args = sys.argv[2:] # remove "trl" and "dpo" launch_command(args) # launch training elif args.command == "env": print_env() elif args.command == "grpo": # Get the default args for the launch command grpo_training_script = os.path.join(os.path.dirname(os.path.abspath(__file__)), "scripts", "grpo.py") args = launch_command_parser().parse_args([grpo_training_script]) # Feed the args to the launch command args.training_script_args = sys.argv[2:] # remove "trl" and "grpo" launch_command(args) # launch training elif args.command == "kto": # Get the default args for the launch command kto_training_script = os.path.join(os.path.dirname(os.path.abspath(__file__)), "scripts", "kto.py") args = launch_command_parser().parse_args([kto_training_script]) # Feed the args to the launch command args.training_script_args = sys.argv[2:] # remove "trl" and "kto" launch_command(args) # launch training elif args.command == "sft": # Get the default args for the launch command sft_training_script = os.path.join(os.path.dirname(os.path.abspath(__file__)), "scripts", "sft.py") args = launch_command_parser().parse_args([sft_training_script]) # Feed the args to the launch command args.training_script_args = sys.argv[2:] # remove "trl" and "sft" launch_command(args) # launch training if __name__ == "__main__": main()
trl/trl/cli.py/0
{ "file_path": "trl/trl/cli.py", "repo_id": "trl", "token_count": 1309 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from contextlib import contextmanager from copy import deepcopy from dataclasses import dataclass from typing import TYPE_CHECKING, Literal, Optional, Union from accelerate.utils import is_deepspeed_available from transformers import PreTrainedModel, PreTrainedTokenizer from .modeling_value_head import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead SUPPORTED_ARCHITECTURES = ( AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead, ) if is_deepspeed_available(): import deepspeed if TYPE_CHECKING: from accelerate import Accelerator from deepspeed.runtime.engine import DeepSpeedEngine from torch.nn.parallel.distributed import DistributedDataParallel from .modeling_base import PreTrainedModelWrapper # TODO: Add Abstract Base Class if more formats are added @dataclass class ChatMlSpecialTokens: """Dataclass for special tokens used in ChatML, including system, user, assistant, bos, eos, and pad tokens.""" bos_token: str = "<|im_start|>" eos_token: str = "<|im_end|>" pad_token: str = "<|im_end|>" @property def system(self): return f"{self.bos_token}system" @property def user(self): return f"{self.bos_token}user" @property def assistant(self): return f"{self.bos_token}assistant" @property def chat_template(self): return ( "{% for message in messages %}" f"{{{{'{self.bos_token}' + message['role'] + '\n' + message['content'] + '{self.eos_token}' + '\n'}}}}" "{% endfor %}" "{% if add_generation_prompt %}" f"{{{{ '{self.assistant}\n' }}}}" "{% endif %}" ) FORMAT_MAPPING = {"chatml": ChatMlSpecialTokens} def setup_chat_format( model: PreTrainedModel, tokenizer: PreTrainedTokenizer, format: Optional[Literal["chatml"]] = "chatml", resize_to_multiple_of: Optional[int] = None, ) -> tuple[PreTrainedModel, PreTrainedTokenizer]: """ Setup chat format by adding special tokens to the tokenizer, setting the correct format, and extending the embedding layer of the model based on the new special tokens. If the model already has a chat template, this will throw an error. If you want to overwrite it, please set `tokenizer.chat_template` to `None`. Args: model (`~transformers.PreTrainedModel`): The model to be modified. tokenizer (`~transformers.PreTrainedTokenizer`): The tokenizer to be modified. format (`Optional[Literal["chatml"]]`): The format to be set. Defaults to "chatml". resize_to_multiple_of (`int` or `None`): Number to resize the embedding layer to. Defaults to None. Returns: model (`~transformers.PreTrainedModel`): The modified model. tokenizer (`~transformers.PreTrainedTokenizer`): The modified tokenizer. """ # check if model already had a chat template if tokenizer.chat_template is not None: raise ValueError( "Chat template is already added to the tokenizer. If you want to overwrite it, please set it to None" ) # check if format available and retrieve if format not in FORMAT_MAPPING: raise ValueError(f"Format {format} not available. Please use one of {FORMAT_MAPPING.keys()}") chat_format = FORMAT_MAPPING[format]() # set special tokens and them tokenizer.eos_token = chat_format.eos_token tokenizer.pad_token = chat_format.pad_token tokenizer.bos_token = chat_format.bos_token tokenizer.add_special_tokens({"additional_special_tokens": [chat_format.bos_token, chat_format.eos_token]}) # set chat format for tokenizer tokenizer.chat_template = chat_format.chat_template # resize embedding layer to a multiple of 64, https://x.com/karpathy/status/1621578354024677377 model.resize_token_embeddings( len(tokenizer), pad_to_multiple_of=resize_to_multiple_of if resize_to_multiple_of is not None else None ) # Update the model config to use the new eos & bos tokens if getattr(model, "config", None) is not None: model.config.pad_token_id = tokenizer.pad_token_id model.config.bos_token_id = tokenizer.bos_token_id model.config.eos_token_id = tokenizer.eos_token_id # Update the generation config to use the new eos & bos token if getattr(model, "generation_config", None) is not None: model.generation_config.bos_token_id = tokenizer.bos_token_id model.generation_config.eos_token_id = tokenizer.eos_token_id model.generation_config.pad_token_id = tokenizer.pad_token_id return model, tokenizer def remove_hooks(model: "DeepSpeedEngine") -> None: """Removes the optimizer hooks from a DeepSpeed ZeRO-3 model.""" if not hasattr(model, "optimizer"): # before the first training step, the model has no optimizer return if model.optimizer is not None and hasattr(model.optimizer, "parameter_offload"): optimizer_offload = model.optimizer.parameter_offload elif model.optimizer is not None: optimizer_offload = model.optimizer for param in iter_params(optimizer_offload.module, recurse=True): param.ds_active_sub_modules.clear() for hook in optimizer_offload.forward_hooks: hook.remove() for hook in optimizer_offload.backward_hooks: hook.remove() optimizer_offload.forward_hooks = [] optimizer_offload.backward_hooks = [] def get_all_parameters(sub_module, recurse=False): return itertools.chain(sub_module.named_parameters(recurse=recurse), sub_module.ds_external_parameters()) def iter_params(module, recurse=False): return [param for _, param in get_all_parameters(module, recurse)] def add_hooks(model: "DeepSpeedEngine") -> None: """Adds the optimizer hooks from a DeepSpeed ZeRO-3 model.""" if not hasattr(model, "optimizer"): # before the first training step, the model has no optimizer return if model.optimizer is not None and hasattr(model.optimizer, "parameter_offload"): optimizer_offload = model.optimizer.parameter_offload elif model.optimizer is not None: optimizer_offload = model.optimizer optimizer_offload._register_hooks_recursively(optimizer_offload.module) @contextmanager def unwrap_model_for_generation( model: Union["DistributedDataParallel", "DeepSpeedEngine"], accelerator: "Accelerator", is_peft_model: bool = False, gather_deepspeed3_params: bool = True, ) -> Union["PreTrainedModelWrapper", "DeepSpeedEngine"]: """Context manager to unwrap a model for generation. For ZeRO-3 models, we gather the weights once to speed up generation. """ unwrapped_model = accelerator.unwrap_model(model) if is_peft_model: unwrapped_model.pretrained_model.disable_adapter() if accelerator.state.deepspeed_plugin is not None and accelerator.state.deepspeed_plugin.zero_stage == 3: if not gather_deepspeed3_params: yield accelerator.unwrap_model(model) else: with deepspeed.zero.GatheredParameters(model.parameters()): remove_hooks(model) yield accelerator.unwrap_model(model) add_hooks(model) else: yield unwrapped_model def prepare_deepspeed(model, accelerator): # Adapted from accelerate: https://github.com/huggingface/accelerate/blob/739b135f8367becb67ffaada12fe76e3aa60fefd/src/accelerate/accelerator.py#L1473 deepspeed_plugin = accelerator.state.deepspeed_plugin config_kwargs = deepcopy(deepspeed_plugin.deepspeed_config) stage = config_kwargs["zero_optimization"]["stage"] if model is not None: hidden_size = ( max(model.config.hidden_sizes) if getattr(model.config, "hidden_sizes", None) else getattr(model.config, "hidden_size", None) ) if hidden_size is not None and stage == 3: # Note that `stage3_prefetch_bucket_size` can produce DeepSpeed messages like: `Invalidate trace cache # @ step 0: expected module 1, but got module 0` # This is expected and is not an error, see: https://github.com/microsoft/DeepSpeed/discussions/4081 config_kwargs.update( { "zero_optimization.reduce_bucket_size": hidden_size * hidden_size, "zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size, "zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size, } ) # If ZeRO-3 is used, we shard both the active and reference model. # Otherwise, we assume the reference model fits in memory and is initialized on each device with ZeRO # disabled (stage 0) if stage != 3: config_kwargs["zero_optimization"]["stage"] = 0 model, *_ = deepspeed.initialize(model=model, config=config_kwargs) model.eval() return model
trl/trl/models/utils.py/0
{ "file_path": "trl/trl/models/utils.py", "repo_id": "trl", "token_count": 3540 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Any, Optional from transformers import TrainingArguments @dataclass class CPOConfig(TrainingArguments): r""" Configuration class for the [`CPOTrainer`]. Using [`~transformers.HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: learning_rate (`float`, *optional*, defaults to `1e-6`): Initial learning rate for [`AdamW`] optimizer. The default value replaces that of [`~transformers.TrainingArguments`]. max_length (`int` or `None`, *optional*, defaults to `1024`): Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want to use the default data collator. max_prompt_length (`int` or `None`, *optional*, defaults to `512`): Maximum length of the prompt. This argument is required if you want to use the default data collator. max_completion_length (`int` or `None`, *optional*, defaults to `None`): Maximum length of the completion. This argument is required if you want to use the default data collator and your model is an encoder-decoder. beta (`float`, *optional*, defaults to `0.1`): Parameter controlling the deviation from the reference model. Higher β means less deviation from the reference model. For the IPO loss (`loss_type="ipo"`), β is the regularization parameter denoted by τ in the [paper](https://huggingface.co/papers/2310.12036). label_smoothing (`float`, *optional*, defaults to `0.0`): Label smoothing factor. This argument is required if you want to use the default data collator. loss_type (`str`, *optional*, defaults to `"sigmoid"`): Type of loss to use. Possible values are: - `"sigmoid"`: sigmoid loss from the original [DPO](https://huggingface.co/papers/2305.18290) paper. - `"hinge"`: hinge loss on the normalized likelihood from the [SLiC](https://huggingface.co/papers/2305.10425) paper. - `"ipo"`: IPO loss from the [IPO](https://huggingface.co/papers/2310.12036) paper. - `"simpo"`: SimPO loss from the [SimPO](https://huggingface.co/papers/2405.14734) paper. disable_dropout (`bool`, *optional*, defaults to `True`): Whether to disable dropout in the model. cpo_alpha (`float`, *optional*, defaults to `1.0`): Weight of the BC regularizer in CPO training. simpo_gamma (`float`, *optional*, defaults to `0.5`): Target reward margin for the SimPO loss, used only when the `loss_type="simpo"`. label_pad_token_id (`int`, *optional*, defaults to `-100`): Label pad token id. This argument is required if you want to use the default data collator. padding_value (`int` or `None`, *optional*, defaults to `None`): Padding value to use. If `None`, the padding value of the tokenizer is used. truncation_mode (`str`,*optional*, defaults to `"keep_end"`): Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`. This argument is required if you want to use the default data collator. generate_during_eval (`bool`, *optional*, defaults to `False`): If `True`, generates and logs completions from the model to W&B or Comet during evaluation. is_encoder_decoder (`bool` or `None`, *optional*, defaults to `None`): When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument, you need to specify if the model returned by the callable is an encoder-decoder model. model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`): Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a string. dataset_num_proc (`int` or `None`, *optional*, defaults to `None`): Number of processes to use for processing the dataset. """ learning_rate: float = field( default=1e-6, metadata={ "help": "Initial learning rate for `AdamW` optimizer. The default value replaces that of " "`transformers.TrainingArguments`." }, ) max_length: Optional[int] = field( default=1024, metadata={"help": "Maximum length of the sequences (prompt + completion) in the batch."}, ) max_prompt_length: Optional[int] = field( default=512, metadata={ "help": "Maximum length of the prompt. This argument is required if you want to use the default data " "collator and your model is an encoder-decoder." }, ) max_completion_length: Optional[int] = field( default=None, metadata={ "help": "Maximum length of the completion. This argument is required if you want to use the default data " "collator and your model is an encoder-decoder." }, ) beta: float = field( default=0.1, metadata={ "help": "Parameter controlling the deviation from the reference model. Higher β means less deviation from " "the reference model." }, ) label_smoothing: float = field( default=0.0, metadata={"help": "Label smoothing factor."}, ) loss_type: str = field( default="sigmoid", metadata={ "help": "Type of loss to use.", "choices": ["sigmoid", "hinge", "ipo", "simpo"], }, ) disable_dropout: bool = field( default=True, metadata={"help": "Whether to disable dropout in the model."}, ) cpo_alpha: float = field( default=1.0, metadata={"help": "Weight of the BC regularizer in CPO training."}, ) simpo_gamma: float = field( default=0.5, metadata={"help": "Target reward margin for the SimPO loss, used only when the `loss_type='simpo'`."}, ) label_pad_token_id: int = field( default=-100, metadata={"help": "Label pad token id."}, ) padding_value: Optional[int] = field( default=None, metadata={"help": "Padding value to use. If `None`, the padding value of the tokenizer is used."}, ) truncation_mode: str = field( default="keep_end", metadata={ "help": "Truncation mode to use when the prompt is too long.", "choices": ["keep_end", "keep_start"], }, ) generate_during_eval: bool = field( default=False, metadata={"help": "If `True`, generates and logs completions from the model to W&B during evaluation."}, ) is_encoder_decoder: Optional[bool] = field( default=None, metadata={"help": "Whether the model is an encoder-decoder model."}, ) model_init_kwargs: Optional[dict[str, Any]] = field( default=None, metadata={ "help": "Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model " "from a string." }, ) dataset_num_proc: Optional[int] = field( default=None, metadata={"help": "Number of processes to use for processing the dataset."}, )
trl/trl/trainer/cpo_config.py/0
{ "file_path": "trl/trl/trainer/cpo_config.py", "repo_id": "trl", "token_count": 3110 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import textwrap from typing import Any, Callable, Optional, Union import jinja2 import torch import torch.nn as nn import torch.nn.functional as F from datasets import Dataset, IterableDataset from transformers import ( BaseImageProcessor, FeatureExtractionMixin, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, TrainerCallback, is_wandb_available, ) from transformers.trainer_utils import EvalPrediction from transformers.training_args import OptimizerNames from transformers.utils import is_apex_available from ..data_utils import is_conversational, maybe_apply_chat_template from ..models.modeling_base import GeometricMixtureWrapper from ..models.utils import unwrap_model_for_generation from .judges import BasePairwiseJudge from .nash_md_config import NashMDConfig from .online_dpo_trainer import OnlineDPOTrainer from .utils import ( SIMPLE_CHAT_TEMPLATE, empty_cache, generate_model_card, get_comet_experiment_url, get_reward, selective_log_softmax, truncate_right, ) if is_apex_available(): from apex import amp if is_wandb_available(): import wandb class NashMDTrainer(OnlineDPOTrainer): r""" Initialize NashMDTrainer as a subclass of [`OnlineDPOConfig`]. Args: model (`transformers.PreTrainedModel`): The model to train, preferably an `AutoModelForCausalLM`. ref_model (`PreTrainedModelWrapper`): Hugging Face transformer model with a casual language modelling head. Used for implicit reward computation and loss. If no reference model is provided, the trainer will create a reference model with the same architecture as the model to be optimized. reward_model (`transformers.PreTrainedModel`): The reward model to score completions with, preferably an `AutoModelForSequenceClassification`. judge (`BasePairwiseJudge`): The judge to use for pairwise comparison of model completions. args (`NashMDConfig`): The NashMD config arguments to use for training. data_collator (`transformers.DataCollator`): The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences. train_dataset (`datasets.Dataset`): The dataset to use for training. eval_dataset (`datasets.Dataset`): The dataset to use for evaluation. processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*): Processing class used to process the data. If provided, will be used to automatically process the inputs for the model, and it will be saved along the model to make it easier to rerun an interrupted training or reuse the fine-tuned model. peft_config (`dict`): The peft config to use for training. compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*): The function to use to compute the metrics. Must take a `EvalPrediction` and return a dictionary string to metric values. callbacks (`list[transformers.TrainerCallback]`): The callbacks to use for training. optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`): The optimizer and scheduler to use for training. preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`): The function to use to preprocess the logits before computing the metrics. """ _tag_names = ["trl", "nash-md"] def __init__( self, model: Union[PreTrainedModel, nn.Module] = None, ref_model: Union[PreTrainedModel, nn.Module] = None, reward_model: Union[PreTrainedModel, nn.Module, None] = None, judge: Optional[BasePairwiseJudge] = None, args: Optional[NashMDConfig] = None, data_collator: Optional[Callable] = None, train_dataset: Optional[Union[Dataset, IterableDataset]] = None, eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None, processing_class: Optional[ Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin] ] = None, peft_config: Optional[dict] = None, compute_metrics: Optional[Callable[[EvalPrediction], dict]] = None, callbacks: Optional[list[TrainerCallback]] = None, optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, ) -> None: super().__init__( model=model, ref_model=ref_model, reward_model=reward_model, judge=judge, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, processing_class=processing_class, reward_processing_class=processing_class, # for now, NashMDTrainer can't use any reward model peft_config=peft_config, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics, ) self._mixture_coef = self.args.mixture_coef # Overwrite the stats dictionary to include NashMD specific statistics self.stats = { # Remove "non_score_reward", "rlhf_reward", "scores_margin" # Add "mixture_coef" "loss/kl": [], "objective/entropy": [], "loss/score": [], "rewards/probabilities": [], "rewards/accuracies": [], "rewards/margins": [], "logps/chosen": [], "logps/rejected": [], "val/model_contain_eos_token": [], "val/ref_contain_eos_token": [], "beta": [], "mixture_coef": [], } if self.reward_model is not None: self.stats["rewards/chosen"] = [] self.stats["rewards/rejected"] = [] @property def mixture_coef(self): if isinstance(self._mixture_coef, list): epoch = self.state.epoch return self._mixture_coef[epoch] if epoch < len(self._mixture_coef) else self._mixture_coef[-1] else: return self._mixture_coef def _generate_completions(self, model, prompts): with unwrap_model_for_generation(model, self.accelerator) as unwrapped_model: model_output = unwrapped_model.generate( input_ids=prompts["input_ids"], attention_mask=prompts["attention_mask"], generation_config=self.generation_config, ) ref_model = model if self.ref_model is None else self.ref_model with torch.no_grad(), unwrap_model_for_generation(ref_model, self.accelerator) as unwrapped_ref_model: mixture_model = GeometricMixtureWrapper( model=unwrapped_model, ref_model=unwrapped_ref_model, generation_config=self.generation_config, mixture_coef=self.mixture_coef, device=self.accelerator.device, ) mixture_output = mixture_model.generate( input_ids=prompts["input_ids"], attention_mask=prompts["attention_mask"], generation_config=self.generation_config, ) return model_output, mixture_output def _process_completions(self, model_output, mixture_output, prompts): context_length = prompts["input_ids"].shape[1] # Process model completions model_completion_ids = model_output[:, context_length:] model_completion_ids, model_completion_mask = truncate_right( model_completion_ids, self.processing_class.eos_token_id, self.processing_class.pad_token_id ) model_data = { "input_ids": torch.cat((prompts["input_ids"], model_completion_ids), dim=1), "attention_mask": torch.cat((prompts["attention_mask"], model_completion_mask), dim=1), "raw": prompts["raw"], } # Process reference model completions mixture_completion_ids = mixture_output[:, context_length:] mixture_completion_ids, mixture_completion_mask = truncate_right( mixture_completion_ids, self.processing_class.eos_token_id, self.processing_class.pad_token_id ) mixture_data = { "input_ids": torch.cat((prompts["input_ids"], mixture_completion_ids), dim=1), "attention_mask": torch.cat((prompts["attention_mask"], mixture_completion_mask), dim=1), "raw": prompts["raw"], } return model_data, mixture_data def _compute_rewards(self, model_data, mixture_data, context_length): with torch.no_grad(): _, model_scores, _ = get_reward( self.reward_model, model_data["input_ids"], self.processing_class.pad_token_id, context_length ) _, mixture_scores, _ = get_reward( self.reward_model, mixture_data["input_ids"], self.processing_class.pad_token_id, context_length ) # Apply EOS penalty if needed if self.args.missing_eos_penalty is not None: model_contain_eos = torch.any(model_data["input_ids"] == self.processing_class.eos_token_id, dim=-1) mixture_contain_eos = torch.any(mixture_data["input_ids"] == self.processing_class.eos_token_id, dim=-1) model_scores[~model_contain_eos] -= self.args.missing_eos_penalty mixture_scores[~mixture_contain_eos] -= self.args.missing_eos_penalty return model_scores, mixture_scores def _compute_judge(self, model_data, mixture_data, context_length): prompts = model_data["raw"] model_data_completions = self.processing_class.batch_decode( model_data["input_ids"][:, context_length:], skip_special_tokens=True ) model_data_completions = [completion.strip() for completion in model_data_completions] mixture_data_completions = self.processing_class.batch_decode( mixture_data["input_ids"][:, context_length:], skip_special_tokens=True ) mixture_data_completions = [completion.strip() for completion in mixture_data_completions] if is_conversational({"prompt": prompts[0]}): model_data_completions = [ [{"role": "assistant", "content": completion}] for completion in model_data_completions ] environment = jinja2.Environment() template = environment.from_string(SIMPLE_CHAT_TEMPLATE) prompts = [template.render(messages=message) for message in prompts] model_data_completions = [template.render(messages=completion) for completion in model_data_completions] mixture_data_completions = [ [{"role": "assistant", "content": completion}] for completion in mixture_data_completions ] mixture_data_completions = [ template.render(messages=completion) for completion in mixture_data_completions ] probability = self.judge.judge( prompts, list(zip(model_data_completions, mixture_data_completions)), return_scores=True, ) return torch.tensor(probability, device=model_data["input_ids"].device) def _compute_logprobs(self, model, model_data, context_length): def compute_logprobs_for_data(m, data): output = m(data["input_ids"], attention_mask=data["attention_mask"]) logits = output.logits[:, context_length - 1 : -1] token_logprobs = selective_log_softmax(logits, data["input_ids"][:, context_length:]) return token_logprobs # Compute logprobs for model completions under the model model_logprobs_model_data = compute_logprobs_for_data(model, model_data) # Compute logprobs of model completions under the reference model with torch.no_grad(): if self.ref_model is None: with model.disable_adapter(): ref_logprobs_model_data = compute_logprobs_for_data(model, model_data) else: ref_logprobs_model_data = compute_logprobs_for_data(self.ref_model, model_data) # Mask padding tokens model_padding_mask = model_data["attention_mask"][:, context_length:] == 0 model_logprobs_model_data = model_logprobs_model_data.masked_fill(model_padding_mask, 0.0) ref_logprobs_model_data = ref_logprobs_model_data.masked_fill(model_padding_mask, 0.0) return (model_logprobs_model_data, ref_logprobs_model_data) def _compute_losses( self, model_logprobs_model_data, ref_logprobs_model_data, probability, ): # reinforce score where 0.5 is a control variate score = (probability - 0.5) * model_logprobs_model_data.sum(1) # kl divergence via reinforce with torch.no_grad(): log_ratio = model_logprobs_model_data - ref_logprobs_model_data kl_div_log = log_ratio.sum(1) kl_div_loss = (log_ratio * model_logprobs_model_data).sum(1) # final loss loss = self.beta * kl_div_loss - score return loss.mean(), score, kl_div_log def _log_statistics( self, model_data, mixture_data, model_logprobs_model_data, ref_logprobs_model_data, probability, score, kl_div, context_length, model_scores=None, mixture_scores=None, ): # Helper function to gather and compute mean def gather_mean(tensor): return self.accelerator.gather_for_metrics(tensor).mean().item() # Log score self.stats["loss/score"].append(gather_mean(score)) # Log KL divergence self.stats["loss/kl"].append(gather_mean(kl_div)) # Log logprobs model_logprobs_model_data_sum = model_logprobs_model_data.sum(1) ref_logprobs_model_data_sum = ref_logprobs_model_data.sum(1) self.stats["logps/chosen"].append(gather_mean(model_logprobs_model_data_sum)) self.stats["logps/rejected"].append(gather_mean(ref_logprobs_model_data_sum)) # Log rewards if self.reward_model is not None: self.stats["rewards/chosen"].append(gather_mean(model_scores)) self.stats["rewards/rejected"].append(gather_mean(mixture_scores)) # Log probabilities self.stats["rewards/probabilities"].append(gather_mean(probability)) # Calculate entropy for model data entropy_model_data = -model_logprobs_model_data.sum(1) self.stats["objective/entropy"].append(gather_mean(entropy_model_data)) # Calculate margins margin = model_logprobs_model_data_sum - ref_logprobs_model_data_sum self.stats["rewards/margins"].append(gather_mean(margin)) # Calculate accuracy accuracy = (margin > 0).float() self.stats["rewards/accuracies"].append(gather_mean(accuracy)) # Log EOS token statistics model_eos = (model_data["input_ids"][:, context_length:] == self.processing_class.eos_token_id).any(dim=1) mixture_eos = (mixture_data["input_ids"][:, context_length:] == self.processing_class.eos_token_id).any(dim=1) self.stats["val/model_contain_eos_token"].append(gather_mean(model_eos.float())) self.stats["val/ref_contain_eos_token"].append(gather_mean(mixture_eos.float())) # Log beta and mixture coef self.stats["beta"].append(self.beta) self.stats["mixture_coef"].append(self.mixture_coef) def training_step( self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], num_items_in_batch: Optional[int] = None ) -> torch.Tensor: model.train() # Apply chat template and tokenize the input batch_size = len(next(iter(inputs.values()))) prompts = inputs["prompt"] inputs = [{k: v[i] for k, v in inputs.items()} for i in range(batch_size)] inputs = [maybe_apply_chat_template(x, self.processing_class) for x in inputs] inputs = [self.tokenize_row(x, self.model.config.is_encoder_decoder, self.processing_class) for x in inputs] inputs = self.data_collator(inputs) # need the prompt_ only inputs = self._prepare_inputs(inputs) context_length = inputs["prompt_input_ids"].shape[1] prompts = { "input_ids": inputs["prompt_input_ids"], "attention_mask": inputs["prompt_attention_mask"], "raw": prompts, } del inputs # Sample completions from both the model and the reference model model_output, mixture_output = self._generate_completions(model, prompts) # Process model completions model_data, mixture_data = self._process_completions(model_output, mixture_output, prompts) # Compute rewards if self.reward_model is not None: model_scores, mixture_scores = self._compute_rewards(model_data, mixture_data, context_length) # probability of the model data vs the mixture data probability = F.sigmoid(model_scores - mixture_scores) else: model_scores, mixture_scores = None, None probability = self._compute_judge(model_data, mixture_data, context_length) # Compute logprobs model_logprobs_model_data, ref_logprobs_model_data = self._compute_logprobs(model, model_data, context_length) # Compute loss loss, score, kl_div = self._compute_losses(model_logprobs_model_data, ref_logprobs_model_data, probability) # Log everything self._log_statistics( model_data, mixture_data, model_logprobs_model_data.detach(), ref_logprobs_model_data, probability, score.detach(), kl_div.detach(), context_length, model_scores, mixture_scores, ) if ( self.args.torch_empty_cache_steps is not None and self.state.global_step % self.args.torch_empty_cache_steps == 0 ): empty_cache() kwargs = {} # For LOMO optimizers you need to explicitly use the learning rate if self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]: kwargs["learning_rate"] = self._get_learning_rate() if self.args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: self.accelerator.backward(loss, **kwargs) return loss.detach() / self.args.gradient_accumulation_steps def create_model_card( self, model_name: Optional[str] = None, dataset_name: Optional[str] = None, tags: Union[str, list[str], None] = None, ): """ Creates a draft of a model card using the information available to the `Trainer`. Args: model_name (`str` or `None`, *optional*, defaults to `None`): Name of the model. dataset_name (`str` or `None`, *optional*, defaults to `None`): Name of the dataset used for training. tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`): Tags to be associated with the model card. """ if not self.is_world_process_zero(): return if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path): base_model = self.model.config._name_or_path else: base_model = None tags = tags or [] if isinstance(tags, str): tags = [tags] if hasattr(self.model.config, "unsloth_version"): tags.append("unsloth") citation = textwrap.dedent("""\ @inproceedings{munos2024nash, title = {{Nash Learning from Human Feedback}}, author = {R{\'{e}}mi Munos and Michal Valko and Daniele Calandriello and Mohammad Gheshlaghi Azar and Mark Rowland and Zhaohan Daniel Guo and Yunhao Tang and Matthieu Geist and Thomas Mesnard and C{\\^{o}}me Fiegel and Andrea Michi and Marco Selvi and Sertan Girgin and Nikola Momchev and Olivier Bachem and Daniel J. Mankowitz and Doina Precup and Bilal Piot}, year = 2024, booktitle = {Forty-first International Conference on Machine Learning, {ICML} 2024, Vienna, Austria, July 21-27, 2024}, publisher = {OpenReview.net}, url = {https://openreview.net/forum?id=Y5AmNYiyCQ} }""") model_card = generate_model_card( base_model=base_model, model_name=model_name, hub_model_id=self.hub_model_id, dataset_name=dataset_name, tags=tags, wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None, comet_url=get_comet_experiment_url(), trainer_name="Nash-MD", trainer_citation=citation, paper_title="Nash Learning from Human Feedback", paper_id="2312.00886", ) model_card.save(os.path.join(self.args.output_dir, "README.md"))
trl/trl/trainer/nash_md_trainer.py/0
{ "file_path": "trl/trl/trainer/nash_md_trainer.py", "repo_id": "trl", "token_count": 9816 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from trl.trainer.online_dpo_config import OnlineDPOConfig @dataclass class XPOConfig(OnlineDPOConfig): r""" Configuration class for the [`XPOTrainer`]. Subclass of [`OnlineDPOConfig`] we can use all its arguments and add the following: Parameters: alpha (`float` or `list[float]`, *optional*, defaults to `1e-5`): Weight of the XPO loss term. If a list of floats is provided then the alpha is selected for each new epoch and the last alpha is used for the rest of the epochs. """ alpha: list[float] = field( default_factory=lambda: [1e-5], metadata={ "help": "Weight of the XPO loss term. If a list of floats is provided then the alpha is selected for each " "new epoch and the last alpha is used for the rest of the epochs." }, ) def __post_init__(self): super().__post_init__() if hasattr(self.alpha, "__len__") and len(self.alpha) == 1: self.alpha = self.alpha[0]
trl/trl/trainer/xpo_config.py/0
{ "file_path": "trl/trl/trainer/xpo_config.py", "repo_id": "trl", "token_count": 566 }
<!--- Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Official Hugging Face Accelerate Docker Images Accelerate publishes a variety of docker versions as part of our CI that users can also use. These are stable images that Accelerate can run off of which comes with a variety of different setup configurations, all of which are officially hosted on [Docker Hub](https://hub.docker.com/r/huggingface/accelerate). A breakdown of each are given below ## Naming Conventions Accelerate docker images follow a tagging convention of: ```bash huggingface/accelerate:{accelerator}-{nightly,release} ``` `accelerator` in this instance is one of many applical pre-configured backend supports: * `gpu`: Comes compiled off of the `nvidia/cuda` image and includes core parts like `bitsandbytes`. Runs off python 3.9. * `cpu`: Comes compiled off of `python:3.9-slim` and is designed for non-CUDA based workloads. * More to come soon * `gpu-deepspeed`: Comes compiled off of the `nvidia/cuda` image and includes core parts like `bitsandbytes` as well as the latest `deepspeed` version. Runs off python 3.10. * `gpu-fp8-transformerengine`: Comes compiled off of `nvcr.io/nvidia/pytorch` and is specifically for running the `benchmarks/fp8` scripts on devices which support FP8 operations using the `TransformerEngine` library (RTX 4090, H100, etc) ## Nightlies vs Releases Each release a new build is pushed with a version number included in the name. For a GPU-supported image of version 0.28.0 for instance, it would look like the following: ```bash huggingface/accelerate:gpu-release-0.28.0 ``` Nightlies contain two different image tags. There is a general `nightly` tag which is built each night, and a `nightly-YYYY-MM-DD` which corresponds to a build from a particular date. For instance, here is an example nightly CPU image from 3/14/2024 ```bash huggingface/accelerate:cpu-nightly-2024-03-14 ``` ## Running the images Each image comes compiled with `conda` and an `accelerate` environment contains all of the installed dependencies. To pull down the latest nightly run: ```bash docker pull huggingface/accelerate:gpu-nightly ``` To then run it in interactive mode with GPU-memory available, run: ```bash docker container run --gpus all -it huggingface/accelerate:gpu-nightly ``` ## DEPRECATED IMAGES CPU and GPU docker images were hosted at `huggingface/accelerate-gpu` and `huggingface/accelerate-cpu`. These builds are now outdated and will not receive updates. The builds at the corresponding `huggingface/accelerate:{gpu,cpu}` contain the same `Dockerfile`, so it's as simple as changing the docker image to the desired ones from above. We will not be deleting these images for posterity, but they will not be receiving updates going forward.
accelerate/docker/README.md/0
{ "file_path": "accelerate/docker/README.md", "repo_id": "accelerate", "token_count": 916 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Executing and deferring jobs When you run your usual script, instructions are executed in order. Using Accelerate to deploy your script on several GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be faster than others. You might need to wait for all processes to have reached a certain point before executing a given instruction. For instance, you shouldn't save a model before being sure every process is done with training, and you wouldn't want to continue training before all the model weights have been loaded in. To do this, just write the following line in your code: ``` accelerator.wait_for_everyone() ``` This instruction will block all the processes that arrive first until all the other processes have reached that point (if you run your script on just one GPU or CPU, this won't do anything). A few example cases of when to use this utility are listed below: <Tip> Some of these are utilized with the [`~Accelerator.main_process_first`] context manager, which utilizes [`~Accelerator.wait_for_everyone`] to run a particular set of code on the main process beforehand before triggering and launching the other processes </Tip> ## Downloading a Dataset When downloading a dataset, you should download it first on the main process and then load the cached dataset afterward <Tip> `load_dataset` will perform a lock under the hood to stop multiple downloads from happening at once, but if you are downloading something not using this library you should use this method. </Tip> ```python with accelerator.main_process_first(): datasets = load_dataset("glue", "mrpc") ``` Under the hood this is the same as calling: ```python # First do something on the main process if accelerator.is_main_process: datasets = load_dataset("glue", "mrpc") else: accelerator.wait_for_everyone() # And then send it to the rest of them if not accelerator.is_main_process: datasets = load_dataset("glue", "mrpc") else: accelerator.wait_for_everyone() ``` ## Saving the `state_dict` When saving the `state_dict` of the model, since you would normally save one file on just the main process you should specify that: ```python if accelerator.is_main_process: model = accelerator.unwrap_model(model) torch.save(model.state_dict(), "weights.pth") ``` ## Loading in the `state_dict` When loading in the `state_dict` to a model, optimizer, or scheduler, you should wait for all workers to have the weights loaded in before moving on to training ```python with accelerator.main_process_first(): state = torch.load("weights.pth") model.load_state_dict(state) ``` ## Applying a multi-worker CPU operation Applying a `map()` operation on multiple workers, such as tokenizing should be done on the main process first, and then propagated to each one. ```python datasets = load_dataset("glue", "mrpc") with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) ``` ## Applying checks such as Early Stopping To have a check that works with a flag set by a particular process, the `set_trigger` and `check_trigger` API should be used. Useful examples for doing so can include situations such as using early stopping and monitoring the loss (as each loss slightly differs on each process). Call [`Accelerator.set_trigger`] when your condition has been met, and [`Accelerator.check_trigger`] when checking if that condition has been met in any process: ```python for (x,y) in data_loader: logits = model(x) loss = loss_func(logits, y) # Assume `should_do_early_stopping` is a custom defined function that returns a conditional if should_do_early_stopping(loss): accelerator.set_trigger() # Later in the training script when we need to check for the breakpoint if accelerator.check_trigger(): break ```
accelerate/docs/source/concept_guides/deferring_execution.md/0
{ "file_path": "accelerate/docs/source/concept_guides/deferring_execution.md", "repo_id": "accelerate", "token_count": 1349 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Distributed inference Distributed inference can fall into three brackets: 1. Loading an entire model onto each GPU and sending chunks of a batch through each GPU's model copy at a time 2. Loading parts of a model onto each GPU and processing a single input at one time 3. Loading parts of a model onto each GPU and using what is called scheduled Pipeline Parallelism to combine the two prior techniques. We're going to go through the first and the last bracket, showcasing how to do each as they are more realistic scenarios. ## Sending chunks of a batch automatically to each loaded model This is the most memory-intensive solution, as it requires each GPU to keep a full copy of the model in memory at a given time. Normally when doing this, users send the model to a specific device to load it from the CPU, and then move each prompt to a different device. A basic pipeline using the `diffusers` library might look something like so: ```python import torch import torch.distributed as dist from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) ``` Followed then by performing inference based on the specific prompt: ```python def run_inference(rank, world_size): dist.init_process_group("nccl", rank=rank, world_size=world_size) pipe.to(rank) if torch.distributed.get_rank() == 0: prompt = "a dog" elif torch.distributed.get_rank() == 1: prompt = "a cat" result = pipe(prompt).images[0] result.save(f"result_{rank}.png") ``` One will notice how we have to check the rank to know what prompt to send, which can be a bit tedious. A user might then also think that with Accelerate, using the `Accelerator` to prepare a dataloader for such a task might also be a simple way to manage this. (To learn more, check out the relevant section in the [Quick Tour](../quicktour#distributed-evaluation)) Can it manage it? Yes. Does it add unneeded extra code however: also yes. With Accelerate, we can simplify this process by using the [`Accelerator.split_between_processes`] context manager (which also exists in `PartialState` and `AcceleratorState`). This function will automatically split whatever data you pass to it (be it a prompt, a set of tensors, a dictionary of the prior data, etc.) across all the processes (with a potential to be padded) for you to use right away. Let's rewrite the above example using this context manager: ```python from accelerate import PartialState # Can also be Accelerator or AcceleratorState from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) distributed_state = PartialState() pipe.to(distributed_state.device) # Assume two processes with distributed_state.split_between_processes(["a dog", "a cat"]) as prompt: result = pipe(prompt).images[0] result.save(f"result_{distributed_state.process_index}.png") ``` And then to launch the code, we can use the Accelerate: If you have generated a config file to be used using `accelerate config`: ```bash accelerate launch distributed_inference.py ``` If you have a specific config file you want to use: ```bash accelerate launch --config_file my_config.json distributed_inference.py ``` Or if don't want to make any config files and launch on two GPUs: > Note: You will get some warnings about values being guessed based on your system. To remove these you can do `accelerate config default` or go through `accelerate config` to create a config file. ```bash accelerate launch --num_processes 2 distributed_inference.py ``` We've now reduced the boilerplate code needed to split this data to a few lines of code quite easily. But what if we have an odd distribution of prompts to GPUs? For example, what if we have 3 prompts, but only 2 GPUs? Under the context manager, the first GPU would receive the first two prompts and the second GPU the third, ensuring that all prompts are split and no overhead is needed. *However*, what if we then wanted to do something with the results of *all the GPUs*? (Say gather them all and perform some kind of post processing) You can pass in `apply_padding=True` to ensure that the lists of prompts are padded to the same length, with extra data being taken from the last sample. This way all GPUs will have the same number of prompts, and you can then gather the results. <Tip> This is only needed when trying to perform an action such as gathering the results, where the data on each device needs to be the same length. Basic inference does not require this. </Tip> For instance: ```python from accelerate import PartialState # Can also be Accelerator or AcceleratorState from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) distributed_state = PartialState() pipe.to(distributed_state.device) # Assume two processes with distributed_state.split_between_processes(["a dog", "a cat", "a chicken"], apply_padding=True) as prompt: result = pipe(prompt).images ``` On the first GPU, the prompts will be `["a dog", "a cat"]`, and on the second GPU it will be `["a chicken", "a chicken"]`. Make sure to drop the final sample, as it will be a duplicate of the previous one. You can find more complex examples [here](https://github.com/huggingface/accelerate/tree/main/examples/inference/distributed) such as how to use it with LLMs. ## Memory-efficient pipeline parallelism (experimental) This next part will discuss using *pipeline parallelism*. This is an **experimental** API that utilizes [torch.distributed.pipelining](https://pytorch.org/docs/stable/distributed.pipelining.html#) as a native solution. The general idea with pipeline parallelism is: say you have 4 GPUs and a model big enough it can be *split* on four GPUs using `device_map="auto"`. With this method you can send in 4 inputs at a time (for example here, any amount works) and each model chunk will work on an input, then receive the next input once the prior chunk finished, making it *much* more efficient **and faster** than the method described earlier. Here's a visual taken from the PyTorch repository: ![Pipeline parallelism example](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/pipeline_parallel.png) To illustrate how you can use this with Accelerate, we have created an [example zoo](https://github.com/huggingface/accelerate/tree/main/examples/inference) showcasing a number of different models and situations. In this tutorial, we'll show this method for GPT2 across two GPUs. Before you proceed, please make sure you have the latest PyTorch version installed by running the following: ```bash pip install torch ``` Start by creating the model on the CPU: ```{python} from transformers import GPT2ForSequenceClassification, GPT2Config config = GPT2Config() model = GPT2ForSequenceClassification(config) model.eval() ``` Next you'll need to create some example inputs to use. These help `torch.distributed.pipelining` trace the model. <Tip warning={true}> However you make this example will determine the relative batch size that will be used/passed through the model at a given time, so make sure to remember how many items there are! </Tip> ```{python} input = torch.randint( low=0, high=config.vocab_size, size=(2, 1024), # bs x seq_len device="cpu", dtype=torch.int64, requires_grad=False, ) ``` Next we need to actually perform the tracing and get the model ready. To do so, use the [`inference.prepare_pippy`] function and it will fully wrap the model for pipeline parallelism automatically: ```{python} from accelerate.inference import prepare_pippy example_inputs = {"input_ids": input} model = prepare_pippy(model, example_args=(input,)) ``` <Tip> There are a variety of parameters you can pass through to `prepare_pippy`: * `split_points` lets you determine what layers to split the model at. By default we use wherever `device_map="auto" declares, such as `fc` or `conv1`. * `num_chunks` determines how the batch will be split and sent to the model itself (so `num_chunks=1` with four split points/four GPUs will have a naive MP where a single input gets passed between the four layer split points) </Tip> From here, all that's left is to actually perform the distributed inference! <Tip warning={true}> When passing inputs, we highly recommend to pass them in as a tuple of arguments. Using `kwargs` is supported, however, this approach is experimental. </Tip> ```{python} args = some_more_arguments with torch.no_grad(): output = model(*args) ``` When finished all the data will be on the last process only: ```{python} from accelerate import PartialState if PartialState().is_last_process: print(output) ``` <Tip> If you pass in `gather_output=True` to [`inference.prepare_pippy`], the output will be sent across to all the GPUs afterwards without needing the `is_last_process` check. This is `False` by default as it incurs a communication call. </Tip> And that's it! To explore more, please check out the inference examples in the [Accelerate repo](https://github.com/huggingface/accelerate/tree/main/examples/inference/pippy) and our [documentation](../package_reference/inference) as we work to improving this integration.
accelerate/docs/source/usage_guides/distributed_inference.md/0
{ "file_path": "accelerate/docs/source/usage_guides/distributed_inference.md", "repo_id": "accelerate", "token_count": 2864 }
# What are these scripts? All scripts in this folder originate from the `nlp_example.py` file, as it is a very simplistic NLP training example using Accelerate with zero extra features. From there, each further script adds in just **one** feature of Accelerate, showing how you can quickly modify your own scripts to implement these capabilities. A full example with all of these parts integrated together can be found in the `complete_nlp_example.py` script and `complete_cv_example.py` script. Adjustments to each script from the base `nlp_example.py` file can be found quickly by searching for "# New Code #" ## Example Scripts by Feature and their Arguments ### Base Example (`../nlp_example.py`) - Shows how to use `Accelerator` in an extremely simplistic PyTorch training loop - Arguments available: - `mixed_precision`, whether to use mixed precision. ("no", "fp16", or "bf16") - `cpu`, whether to train using only the CPU. (yes/no/1/0) All following scripts also accept these arguments in addition to their added ones. These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.run`), such as: ```bash accelerate launch ../nlp_example.py --mixed_precision fp16 --cpu 0 ``` ### Checkpointing and Resuming Training (`checkpointing.py`) - Shows how to use `Accelerator.save_state` and `Accelerator.load_state` to save or continue training - **It is assumed you are continuing off the same training script** - Arguments available: - `checkpointing_steps`, after how many steps the various states should be saved. ("epoch", 1, 2, ...) - `output_dir`, where saved state folders should be saved to, default is current working directory - `resume_from_checkpoint`, what checkpoint folder to resume from. ("epoch_0", "step_22", ...) These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: (Note, `resume_from_checkpoint` assumes that we've ran the script for one epoch with the `--checkpointing_steps epoch` flag) ```bash accelerate launch ./checkpointing.py --checkpointing_steps epoch output_dir "checkpointing_tutorial" --resume_from_checkpoint "checkpointing_tutorial/epoch_0" ``` ### Cross Validation (`cross_validation.py`) - Shows how to use `Accelerator.free_memory` and run cross validation efficiently with `datasets`. - Arguments available: - `num_folds`, the number of folds the training dataset should be split into. These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./cross_validation.py --num_folds 2 ``` ### Experiment Tracking (`tracking.py`) - Shows how to use `Accelerate.init_trackers` and `Accelerator.log` - Can be used with Weights and Biases, TensorBoard, or CometML. - Arguments available: - `with_tracking`, whether to load in all available experiment trackers from the environment. These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./tracking.py --with_tracking ``` ### Gradient Accumulation (`gradient_accumulation.py`) - Shows how to use `Accelerator.no_sync` to prevent gradient averaging in a distributed setup. - Arguments available: - `gradient_accumulation_steps`, the number of steps to perform before the gradients are accumulated and the optimizer and scheduler are stepped + zero_grad These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./gradient_accumulation.py --gradient_accumulation_steps 5 ``` ### LocalSGD (`local_sgd.py`) - Shows how to use `Accelerator.no_sync` to prevent gradient averaging in a distributed setup. However, unlike gradient accumulation, this method does not change the effective batch size. Local SGD can be combined with gradient accumulation. These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./local_sgd.py --local_sgd_steps 4 ``` ### DDP Communication Hook (`ddp_comm_hook.py`) - Shows how to use DDP Communication Hooks to control and optimize gradient communication across workers in a DistributedDataParallel setup. - Arguments available: - `ddp_comm_hook`, the type of DDP communication hook to use. Choose between `no`, `fp16`, `bf16`, `power_sgd`, and `batched_power_sgd`. These arguments should be added at the end of any method for starting the python script (such as `accelerate launch`, `python -m torch.distributed.run`), such as: ```bash accelerate launch ./ddp_comm_hook.py --mixed_precision fp16 --ddp_comm_hook power_sgd ``` ### Profiler (`profiler.py`) - Shows how to use the profiling capabilities of `Accelerate` to profile PyTorch models during training. - Uses the `ProfileKwargs` handler to customize profiling options, including activities, scheduling, and additional profiling options. - Can generate and save profiling traces in JSON format for visualization in Chrome's tracing tool. Arguments available: - `--record_shapes`: If passed, records shapes for profiling. - `--profile_memory`: If passed, profiles memory usage. - `--with_stack`: If passed, profiles stack traces. - `--with_flops`: If passed, profiles floating point operations (FLOPS). - `--output_trace_dir`: If specified, saves the profiling trace to the given dir in JSON format. - `--cpu`: If passed, trains on the CPU instead of GPU. These arguments should be added at the end of any method for starting the Python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./profiler.py --record_shapes --profile_memory --with_flops --output_trace_dir "profiler" ```
accelerate/examples/by_feature/README.md/0
{ "file_path": "accelerate/examples/by_feature/README.md", "repo_id": "accelerate", "token_count": 1692 }
distributed_type: FSDP fsdp_config: fsdp_activation_checkpointing: false fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch: BACKWARD_PRE fsdp_cpu_ram_efficient_loading: true fsdp_forward_prefetch: false fsdp_offload_params: false fsdp_sharding_strategy: FULL_SHARD fsdp_state_dict_type: SHARDED_STATE_DICT fsdp_sync_module_states: true fsdp_use_orig_params: true
accelerate/examples/slurm/fsdp_config.yaml/0
{ "file_path": "accelerate/examples/slurm/fsdp_config.yaml", "repo_id": "accelerate", "token_count": 167 }
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_boto3_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_boto3_available(): import boto3 # noqa: F401 def _create_iam_role_for_sagemaker(role_name): iam_client = boto3.client("iam") sagemaker_trust_policy = { "Version": "2012-10-17", "Statement": [ {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=role_name, AssumeRolePolicyDocument=json.dumps(sagemaker_trust_policy, indent=2) ) policy_document = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "sagemaker:*", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage", "ecr:BatchCheckLayerAvailability", "ecr:GetAuthorizationToken", "cloudwatch:PutMetricData", "cloudwatch:GetMetricData", "cloudwatch:GetMetricStatistics", "cloudwatch:ListMetrics", "logs:CreateLogGroup", "logs:CreateLogStream", "logs:DescribeLogStreams", "logs:PutLogEvents", "logs:GetLogEvents", "s3:CreateBucket", "s3:ListBucket", "s3:GetBucketLocation", "s3:GetObject", "s3:PutObject", ], "Resource": "*", } ], } # attach policy to role iam_client.put_role_policy( RoleName=role_name, PolicyName=f"{role_name}_policy_permission", PolicyDocument=json.dumps(policy_document, indent=2), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f"role {role_name} already exists. Using existing one") def _get_iam_role_arn(role_name): iam_client = boto3.client("iam") return iam_client.get_role(RoleName=role_name)["Role"]["Arn"] def get_sagemaker_input(): credentials_configuration = _ask_options( "How do you want to authorize?", ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "], int, ) aws_profile = None if credentials_configuration == 0: aws_profile = _ask_field("Enter your AWS Profile name: [default] ", default="default") os.environ["AWS_PROFILE"] = aws_profile else: print( "Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with," "`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" ) aws_access_key_id = _ask_field("AWS Access Key ID: ") os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id aws_secret_access_key = _ask_field("AWS Secret Access Key: ") os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key aws_region = _ask_field("Enter your AWS Region: [us-east-1]", default="us-east-1") os.environ["AWS_DEFAULT_REGION"] = aws_region role_management = _ask_options( "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?", ["Provide IAM Role name", "Create new IAM role using credentials"], int, ) if role_management == 0: iam_role_name = _ask_field("Enter your IAM role name: ") else: iam_role_name = "accelerate_sagemaker_execution_role" print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials') _create_iam_role_for_sagemaker(iam_role_name) is_custom_docker_image = _ask_field( "Do you want to use custom Docker image? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) docker_image = None if is_custom_docker_image: docker_image = _ask_field("Enter your Docker image: ", lambda x: str(x).lower()) is_sagemaker_inputs_enabled = _ask_field( "Do you want to provide SageMaker input channels with data locations? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) sagemaker_inputs_file = None if is_sagemaker_inputs_enabled: sagemaker_inputs_file = _ask_field( "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ", lambda x: str(x).lower(), ) is_sagemaker_metrics_enabled = _ask_field( "Do you want to enable SageMaker metrics? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) sagemaker_metrics_file = None if is_sagemaker_metrics_enabled: sagemaker_metrics_file = _ask_field( "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ", lambda x: str(x).lower(), ) distributed_type = _ask_options( "What is the distributed mode?", ["No distributed training", "Data parallelism"], _convert_sagemaker_distributed_mode, ) dynamo_config = {} use_dynamo = _ask_field( "Do you wish to optimize your script with torch dynamo?[yes/NO]:", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_dynamo: prefix = "dynamo_" dynamo_config[prefix + "backend"] = _ask_options( "Which dynamo backend would you like to use?", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) use_custom_options = _ask_field( "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_custom_options: dynamo_config[prefix + "mode"] = _ask_options( "Which mode do you want to use?", TORCH_DYNAMO_MODES, lambda x: TORCH_DYNAMO_MODES[int(x)], default="default", ) dynamo_config[prefix + "use_fullgraph"] = _ask_field( "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) dynamo_config[prefix + "use_dynamic"] = _ask_field( "Do you want to enable dynamic shape tracing? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) ec2_instance_query = "Which EC2 instance type you want to use for your training?" if distributed_type != SageMakerDistributedType.NO: ec2_instance_type = _ask_options( ec2_instance_query, SAGEMAKER_PARALLEL_EC2_INSTANCES, lambda x: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(x)] ) else: ec2_instance_query += "? [ml.p3.2xlarge]:" ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default="ml.p3.2xlarge") debug = False if distributed_type != SageMakerDistributedType.NO: debug = _ask_field( "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) num_machines = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): num_machines = _ask_field( "How many machines do you want use? [1]: ", int, default=1, ) mixed_precision = _ask_options( "Do you wish to use FP16 or BF16 (mixed precision)?", ["no", "fp16", "bf16", "fp8"], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." ) return SageMakerConfig( image_uri=docker_image, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=distributed_type, use_cpu=False, dynamo_config=dynamo_config, ec2_instance_type=ec2_instance_type, profile=aws_profile, region=aws_region, iam_role_name=iam_role_name, mixed_precision=mixed_precision, num_machines=num_machines, sagemaker_inputs_file=sagemaker_inputs_file, sagemaker_metrics_file=sagemaker_metrics_file, debug=debug, )
accelerate/src/accelerate/commands/config/sagemaker.py/0
{ "file_path": "accelerate/src/accelerate/commands/config/sagemaker.py", "repo_id": "accelerate", "token_count": 4784 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from typing import Dict, List, Mapping, Optional, Union import torch import torch.nn as nn from .state import PartialState from .utils import ( PrefixedDataset, find_device, named_module_tensors, send_to_device, set_module_tensor_to_device, ) from .utils.imports import ( is_mlu_available, is_musa_available, is_npu_available, is_xpu_available, ) from .utils.memory import clear_device_cache from .utils.modeling import get_non_persistent_buffers from .utils.other import recursive_getattr _accelerate_added_attributes = ["to", "cuda", "npu", "xpu", "mlu", "musa"] class ModelHook: """ A hook that contains callbacks to be executed just before and after the forward method of a model. The difference with PyTorch existing hooks is that they get passed along the kwargs. Class attribute: - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under the `torch.no_grad()` context manager. """ no_grad = False def init_hook(self, module): """ To be executed when the hook is attached to the module. Args: module (`torch.nn.Module`): The module attached to this hook. """ return module def pre_forward(self, module, *args, **kwargs): """ To be executed just before the forward method of the model. Args: module (`torch.nn.Module`): The module whose forward pass will be executed just after this event. args (`Tuple[Any]`): The positional arguments passed to the module. kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module. Returns: `Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`. """ return args, kwargs def post_forward(self, module, output): """ To be executed just after the forward method of the model. Args: module (`torch.nn.Module`): The module whose forward pass been executed just before this event. output (`Any`): The output of the module. Returns: `Any`: The processed `output`. """ return output def detach_hook(self, module): """ To be executed when the hook is detached from a module. Args: module (`torch.nn.Module`): The module detached from this hook. """ return module class SequentialHook(ModelHook): """ A hook that can contain several hooks and iterates through them at each event. """ def __init__(self, *hooks): self.hooks = hooks def init_hook(self, module): for hook in self.hooks: module = hook.init_hook(module) return module def pre_forward(self, module, *args, **kwargs): for hook in self.hooks: args, kwargs = hook.pre_forward(module, *args, **kwargs) return args, kwargs def post_forward(self, module, output): for hook in self.hooks: output = hook.post_forward(module, output) return output def detach_hook(self, module): for hook in self.hooks: module = hook.detach_hook(module) return module def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False): """ Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove this behavior and restore the original `forward` method, use `remove_hook_from_module`. <Tip warning={true}> If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class. </Tip> Args: module (`torch.nn.Module`): The module to attach a hook to. hook (`ModelHook`): The hook to attach. append (`bool`, *optional*, defaults to `False`): Whether the hook should be chained with an existing one (if module already contains a hook) or not. Returns: `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can be discarded). """ if append and (getattr(module, "_hf_hook", None) is not None): old_hook = module._hf_hook remove_hook_from_module(module) hook = SequentialHook(old_hook, hook) if hasattr(module, "_hf_hook") and hasattr(module, "_old_forward"): # If we already put some hook on this module, we replace it with the new one. old_forward = module._old_forward else: old_forward = module.forward module._old_forward = old_forward module = hook.init_hook(module) module._hf_hook = hook def new_forward(module, *args, **kwargs): args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs) if module._hf_hook.no_grad: with torch.no_grad(): output = module._old_forward(*args, **kwargs) else: output = module._old_forward(*args, **kwargs) return module._hf_hook.post_forward(module, output) # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail. # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409 if "GraphModuleImpl" in str(type(module)): module.__class__.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward) else: module.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward) return module def remove_hook_from_module(module: nn.Module, recurse=False): """ Removes any hook attached to a module via `add_hook_to_module`. Args: module (`torch.nn.Module`): The module to attach a hook to. recurse (`bool`, **optional**): Whether to remove the hooks recursively Returns: `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can be discarded). """ if hasattr(module, "_hf_hook"): module._hf_hook.detach_hook(module) delattr(module, "_hf_hook") if hasattr(module, "_old_forward"): # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail. # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409 if "GraphModuleImpl" in str(type(module)): module.__class__.forward = module._old_forward else: module.forward = module._old_forward delattr(module, "_old_forward") # Remove accelerate added warning hooks from dispatch_model for attr in _accelerate_added_attributes: module.__dict__.pop(attr, None) if recurse: for child in module.children(): remove_hook_from_module(child, recurse) return module class AlignDevicesHook(ModelHook): """ A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the associated module, potentially offloading the weights after the forward pass. Args: execution_device (`torch.device`, *optional*): The device on which inputs and model weights should be placed before the forward pass. offload (`bool`, *optional*, defaults to `False`): Whether or not the weights should be offloaded after the forward pass. io_same_device (`bool`, *optional*, defaults to `False`): Whether or not the output should be placed on the same device as the input was. weights_map (`Mapping[str, torch.Tensor]`, *optional*): When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to include the associated module's buffers when offloading. place_submodules (`bool`, *optional*, defaults to `False`): Whether to place the submodules on `execution_device` during the `init_hook` event. """ def __init__( self, execution_device: Optional[Union[int, str, torch.device]] = None, offload: bool = False, io_same_device: bool = False, weights_map: Optional[Mapping] = None, offload_buffers: bool = False, place_submodules: bool = False, skip_keys: Optional[Union[str, List[str]]] = None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, ): self.execution_device = execution_device self.offload = offload self.io_same_device = io_same_device self.weights_map = weights_map self.offload_buffers = offload_buffers self.place_submodules = place_submodules self.skip_keys = skip_keys # Will contain the input device when `io_same_device=True`. self.input_device = None self.param_original_devices = {} self.buffer_original_devices = {} self.tied_params_names = set() # The hook pre_forward/post_forward need to have knowledge of this dictionary, as with offloading we want to avoid duplicating memory # for tied weights already loaded on the target execution device. self.tied_params_map = tied_params_map def __repr__(self): return ( f"AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, " f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, " f"place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})" ) def init_hook(self, module): # In case the AlignDevicesHook is on meta device, ignore tied weights as data_ptr() is then always zero. if self.execution_device == "meta" or self.execution_device == torch.device("meta"): self.tied_params_map = None if not self.offload and self.execution_device is not None: for name, _ in named_module_tensors(module, recurse=self.place_submodules): set_module_tensor_to_device(module, name, self.execution_device, tied_params_map=self.tied_params_map) elif self.offload: self.original_devices = { name: param.device for name, param in named_module_tensors(module, recurse=self.place_submodules) } if self.weights_map is None: self.weights_map = { name: param.to("cpu") for name, param in named_module_tensors( module, include_buffers=self.offload_buffers, recurse=self.place_submodules ) } for name, _ in named_module_tensors( module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True ): # When using disk offloading, we can not rely on `weights_map[name].data_ptr()` as the reference pointer, # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer. # As we have no reliable way to track the shared data pointer of tied weights in this case, we use tied_params_names: List[str] # to add on the fly pointers to `tied_params_map` in the pre_forward call. if ( self.tied_params_map is not None and recursive_getattr(module, name).data_ptr() in self.tied_params_map ): self.tied_params_names.add(name) set_module_tensor_to_device(module, name, "meta") if not self.offload_buffers and self.execution_device is not None: for name, _ in module.named_buffers(recurse=self.place_submodules): set_module_tensor_to_device( module, name, self.execution_device, tied_params_map=self.tied_params_map ) elif self.offload_buffers and self.execution_device is not None: for name in get_non_persistent_buffers(module, recurse=self.place_submodules): set_module_tensor_to_device( module, name, self.execution_device, tied_params_map=self.tied_params_map ) return module def pre_forward(self, module, *args, **kwargs): if self.io_same_device: self.input_device = find_device([args, kwargs]) if self.offload: self.tied_pointers_to_remove = set() for name, _ in named_module_tensors( module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True, ): fp16_statistics = None value = self.weights_map[name] if "weight" in name and name.replace("weight", "SCB") in self.weights_map.keys(): if value.dtype == torch.int8: fp16_statistics = self.weights_map[name.replace("weight", "SCB")] # In case we are using offloading with tied weights, we need to keep track of the offloaded weights # that are loaded on device at this point, as we will need to remove them as well from the dictionary # self.tied_params_map in order to allow to free memory. if name in self.tied_params_names and value.data_ptr() not in self.tied_params_map: self.tied_params_map[value.data_ptr()] = {} if ( value is not None and self.tied_params_map is not None and value.data_ptr() in self.tied_params_map and self.execution_device not in self.tied_params_map[value.data_ptr()] ): self.tied_pointers_to_remove.add((value.data_ptr(), self.execution_device)) set_module_tensor_to_device( module, name, self.execution_device, value=value, fp16_statistics=fp16_statistics, tied_params_map=self.tied_params_map, ) return send_to_device(args, self.execution_device), send_to_device( kwargs, self.execution_device, skip_keys=self.skip_keys ) def post_forward(self, module, output): if self.offload: for name, _ in named_module_tensors( module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True, ): set_module_tensor_to_device(module, name, "meta") if type(module).__name__ == "Linear8bitLt": module.state.SCB = None module.state.CxB = None # We may have loaded tied weights into self.tied_params_map (avoiding to load them several times in e.g. submodules): remove them from # this dictionary to allow the garbage collector to do its job. for value_pointer, device in self.tied_pointers_to_remove: if isinstance(device, int): if is_npu_available(): device = f"npu:{device}" elif is_mlu_available(): device = f"mlu:{device}" elif is_musa_available(): device = f"musa:{device}" elif is_xpu_available(): device = f"xpu:{device}" del self.tied_params_map[value_pointer][device] self.tied_pointers_to_remove = set() if self.io_same_device and self.input_device is not None: output = send_to_device(output, self.input_device, skip_keys=self.skip_keys) return output def detach_hook(self, module): if self.offload: for name, device in self.original_devices.items(): if device != torch.device("meta"): set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None)) return module def attach_execution_device_hook( module: torch.nn.Module, execution_device: Union[int, str, torch.device], skip_keys: Optional[Union[str, List[str]]] = None, preload_module_classes: Optional[List[str]] = None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, ): """ Recursively attaches `AlignDevicesHook` to all submodules of a given model to make sure they have the right execution device Args: module (`torch.nn.Module`): The module where we want to attach the hooks. execution_device (`int`, `str` or `torch.device`): The device on which inputs and model weights should be placed before the forward pass. skip_keys (`str` or `List[str]`, *optional*): A list of keys to ignore when moving inputs or outputs between devices. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`): A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution device, this parameter is useful to reuse the first available pointer of a shared weight for all others, instead of duplicating memory. """ if not hasattr(module, "_hf_hook") and len(module.state_dict()) > 0: add_hook_to_module( module, AlignDevicesHook(execution_device, skip_keys=skip_keys, tied_params_map=tied_params_map), ) # Break the recursion if we get to a preload module. if preload_module_classes is not None and module.__class__.__name__ in preload_module_classes: return for child in module.children(): attach_execution_device_hook( child, execution_device, skip_keys=skip_keys, preload_module_classes=preload_module_classes, tied_params_map=tied_params_map, ) def attach_align_device_hook( module: torch.nn.Module, execution_device: Optional[torch.device] = None, offload: bool = False, weights_map: Optional[Mapping] = None, offload_buffers: bool = False, module_name: str = "", skip_keys: Optional[Union[str, List[str]]] = None, preload_module_classes: Optional[List[str]] = None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, ): """ Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or buffers. Args: module (`torch.nn.Module`): The module where we want to attach the hooks. execution_device (`torch.device`, *optional*): The device on which inputs and model weights should be placed before the forward pass. offload (`bool`, *optional*, defaults to `False`): Whether or not the weights should be offloaded after the forward pass. weights_map (`Mapping[str, torch.Tensor]`, *optional*): When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to include the associated module's buffers when offloading. module_name (`str`, *optional*, defaults to `""`): The name of the module. skip_keys (`str` or `List[str]`, *optional*): A list of keys to ignore when moving inputs or outputs between devices. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`): A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution device, this parameter is useful to reuse the first available pointer of a shared weight for all others, instead of duplicating memory. """ # Attach the hook on this module if it has any direct tensor. directs = named_module_tensors(module) full_offload = ( offload and preload_module_classes is not None and module.__class__.__name__ in preload_module_classes ) if len(list(directs)) > 0 or full_offload: if weights_map is not None: prefix = f"{module_name}." if len(module_name) > 0 else "" prefixed_weights_map = PrefixedDataset(weights_map, prefix) else: prefixed_weights_map = None hook = AlignDevicesHook( execution_device=execution_device, offload=offload, weights_map=prefixed_weights_map, offload_buffers=offload_buffers, place_submodules=full_offload, skip_keys=skip_keys, tied_params_map=tied_params_map, ) add_hook_to_module(module, hook, append=True) # We stop the recursion in case we hit the full offload. if full_offload: return # Recurse on all children of the module. for child_name, child in module.named_children(): child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name attach_align_device_hook( child, execution_device=execution_device, offload=offload, weights_map=weights_map, offload_buffers=offload_buffers, module_name=child_name, preload_module_classes=preload_module_classes, skip_keys=skip_keys, tied_params_map=tied_params_map, ) def remove_hook_from_submodules(module: nn.Module): """ Recursively removes all hooks attached on the submodules of a given model. Args: module (`torch.nn.Module`): The module on which to remove all hooks. """ remove_hook_from_module(module) for child in module.children(): remove_hook_from_submodules(child) def attach_align_device_hook_on_blocks( module: nn.Module, execution_device: Optional[Union[torch.device, Dict[str, torch.device]]] = None, offload: Union[bool, Dict[str, bool]] = False, weights_map: Mapping = None, offload_buffers: bool = False, module_name: str = "", skip_keys: Optional[Union[str, List[str]]] = None, preload_module_classes: Optional[List[str]] = None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, ): """ Attaches `AlignDevicesHook` to all blocks of a given model as needed. Args: module (`torch.nn.Module`): The module where we want to attach the hooks. execution_device (`torch.device` or `Dict[str, torch.device]`, *optional*): The device on which inputs and model weights should be placed before the forward pass. It can be one device for the whole module, or a dictionary mapping module name to device. offload (`bool`, *optional*, defaults to `False`): Whether or not the weights should be offloaded after the forward pass. It can be one boolean for the whole module, or a dictionary mapping module name to boolean. weights_map (`Mapping[str, torch.Tensor]`, *optional*): When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to include the associated module's buffers when offloading. module_name (`str`, *optional*, defaults to `""`): The name of the module. skip_keys (`str` or `List[str]`, *optional*): A list of keys to ignore when moving inputs or outputs between devices. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`): A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution device, this parameter is useful to reuse the first available pointer of a shared weight for all others, instead of duplicating memory. """ # If one device and one offload, we've got one hook. if not isinstance(execution_device, Mapping) and not isinstance(offload, dict): if not offload: hook = AlignDevicesHook( execution_device=execution_device, io_same_device=True, skip_keys=skip_keys, place_submodules=True, tied_params_map=tied_params_map, ) add_hook_to_module(module, hook) else: attach_align_device_hook( module, execution_device=execution_device, offload=True, weights_map=weights_map, offload_buffers=offload_buffers, module_name=module_name, skip_keys=skip_keys, tied_params_map=tied_params_map, ) return if not isinstance(execution_device, Mapping): execution_device = {key: execution_device for key in offload.keys()} if not isinstance(offload, Mapping): offload = {key: offload for key in execution_device.keys()} if module_name in execution_device and module_name in offload and not offload[module_name]: hook = AlignDevicesHook( execution_device=execution_device[module_name], offload_buffers=offload_buffers, io_same_device=(module_name == ""), place_submodules=True, skip_keys=skip_keys, tied_params_map=tied_params_map, ) add_hook_to_module(module, hook) attach_execution_device_hook( module, execution_device[module_name], skip_keys=skip_keys, tied_params_map=tied_params_map ) elif module_name in execution_device and module_name in offload: attach_align_device_hook( module, execution_device=execution_device[module_name], offload=True, weights_map=weights_map, offload_buffers=offload_buffers, module_name=module_name, skip_keys=skip_keys, preload_module_classes=preload_module_classes, tied_params_map=tied_params_map, ) if not hasattr(module, "_hf_hook"): hook = AlignDevicesHook( execution_device=execution_device[module_name], io_same_device=(module_name == ""), skip_keys=skip_keys, tied_params_map=tied_params_map, ) add_hook_to_module(module, hook) attach_execution_device_hook( module, execution_device[module_name], preload_module_classes=preload_module_classes, skip_keys=skip_keys, tied_params_map=tied_params_map, ) elif module_name == "": hook = AlignDevicesHook( execution_device=execution_device.get(""), io_same_device=True, skip_keys=skip_keys, tied_params_map=tied_params_map, ) add_hook_to_module(module, hook) for child_name, child in module.named_children(): child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name attach_align_device_hook_on_blocks( child, execution_device=execution_device, offload=offload, weights_map=weights_map, offload_buffers=offload_buffers, module_name=child_name, preload_module_classes=preload_module_classes, skip_keys=skip_keys, tied_params_map=tied_params_map, ) class CpuOffload(ModelHook): """ Offloads a model on the CPU until its forward pass is called. The model will not be offloaded back to the CPU after the forward, the user needs to call the `init_hook` method again for this. Args: execution_device(`str`, `int` or `torch.device`, *optional*): The device on which the model should be executed. Will default to the MPS device if it's available, then GPU 0 if there is a GPU, and finally to the CPU. prev_module_hook (`UserCpuOffloadHook`, *optional*): The hook sent back by [`cpu_offload_with_hook`] for a previous model in the pipeline you are running. If passed, its offload method will be called just before the forward of the model to which this hook is attached. """ def __init__( self, execution_device: Optional[Union[str, int, torch.device]] = None, prev_module_hook: Optional["UserCpuOffloadHook"] = None, ): self.prev_module_hook = prev_module_hook self.execution_device = execution_device if execution_device is not None else PartialState().default_device def init_hook(self, module): return module.to("cpu") def pre_forward(self, module, *args, **kwargs): if self.prev_module_hook is not None: self.prev_module_hook.offload() clear_device_cache() module.to(self.execution_device) return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device) class UserCpuOffloadHook: """ A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook or remove it entirely. """ def __init__(self, model, hook): self.model = model self.hook = hook def offload(self): self.hook.init_hook(self.model) def remove(self): remove_hook_from_module(self.model)
accelerate/src/accelerate/hooks.py/0
{ "file_path": "accelerate/src/accelerate/hooks.py", "repo_id": "accelerate", "token_count": 13482 }
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import is_mlu_available, is_musa_available, is_npu_available, is_xpu_available from accelerate.utils.deepspeed import DummyOptim, DummyScheduler MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 # Converting Bytes to Megabytes def b2mb(x): return int(x / 2**20) # This context manager is used to track the peak memory usage of the process class TorchTracemalloc: def __enter__(self): gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.cuda.memory_allocated() elif is_mlu_available(): torch.mlu.empty_cache() torch.mlu.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.mlu.memory_allocated() elif is_musa_available(): torch.musa.empty_cache() torch.musa.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.musa.memory_allocated() elif is_npu_available(): torch.npu.empty_cache() torch.npu.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.npu.memory_allocated() elif is_xpu_available(): torch.xpu.empty_cache() torch.xpu.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.xpu.memory_allocated() return self def __exit__(self, *exc): gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() self.end = torch.cuda.memory_allocated() self.peak = torch.cuda.max_memory_allocated() elif is_mlu_available(): torch.mlu.empty_cache() torch.mlu.memory_allocated() # reset the peak gauge to zero self.begin = torch.mlu.max_memory_allocated() elif is_musa_available(): torch.musa.empty_cache() torch.musa.memory_allocated() # reset the peak gauge to zero self.begin = torch.musa.max_memory_allocated() elif is_npu_available(): torch.npu.empty_cache() self.end = torch.npu.memory_allocated() self.peak = torch.npu.max_memory_allocated() elif is_xpu_available(): torch.xpu.empty_cache() self.end = torch.xpu.memory_allocated() self.peak = torch.xpu.max_memory_allocated() self.used = b2mb(self.end - self.begin) self.peaked = b2mb(self.peak - self.begin) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def get_dataloaders( accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased", n_train: int = 320, n_val: int = 160, ): """ Creates a set of `DataLoader`s for the `glue` dataset. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. model_name (`str`, *optional*): The name of the model to use. n_train (`int`, *optional*): The number of training examples to use. n_val (`int`, *optional*): The number of validation examples to use. """ tokenizer = AutoTokenizer.from_pretrained(model_name) datasets = load_dataset( "glue", "mrpc", split={"train": f"train[:{n_train}]", "validation": f"validation[:{n_val}]"} ) def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.XLA: return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") return tokenizer.pad(examples, padding="longest", return_tensors="pt") # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader def training_function(config, args): # Initialize accelerator accelerator = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) model_name = args.model_name_or_path set_seed(seed) train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name, args.n_train, args.n_val) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True) # Instantiate optimizer optimizer_cls = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) optimizer = optimizer_cls(params=model.parameters(), lr=lr) if accelerator.state.deepspeed_plugin is not None: gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: gradient_accumulation_steps = 1 max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=max_training_steps, ) else: lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We need to keep track of how many total steps we have iterated over overall_step = 0 # We also need to keep track of the stating epoch so files are named properly starting_epoch = 0 # Now we train the model train_total_peak_memory = {} for epoch in range(starting_epoch, num_epochs): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(train_dataloader): outputs = model(**batch) loss = outputs.loss loss = loss / gradient_accumulation_steps accelerator.backward(loss) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print(f"Memory before entering the train : {b2mb(tracemalloc.begin)}") accelerator.print(f"Memory consumed at the end of the train (end-begin): {tracemalloc.used}") accelerator.print(f"Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}") accelerator.print( f"Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}" ) train_total_peak_memory[f"epoch-{epoch}"] = tracemalloc.peaked + b2mb(tracemalloc.begin) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir, "peak_memory_utilization.json"), "w") as f: json.dump(train_total_peak_memory, f) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") parser.add_argument( "--model_name_or_path", type=str, default="bert-base-cased", help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--output_dir", type=str, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--peak_memory_upper_bound", type=float, default=None, help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.", ) parser.add_argument( "--n_train", type=int, default=320, help="Number of training examples to use.", ) parser.add_argument( "--n_val", type=int, default=160, help="Number of validation examples to use.", ) parser.add_argument( "--num_epochs", type=int, default=1, help="Number of train epochs.", ) args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main()
accelerate/src/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py", "repo_id": "accelerate", "token_count": 4676 }
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from copy import deepcopy from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_4bit_bnb_available, is_8bit_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) logger = logging.getLogger(__name__) def load_and_quantize_model( model: torch.nn.Module, bnb_quantization_config: BnbQuantizationConfig, weights_location: Union[str, os.PathLike] = None, device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None, no_split_module_classes: Optional[List[str]] = None, max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, offload_folder: Optional[Union[str, os.PathLike]] = None, offload_state_dict: bool = False, ): """ This function will quantize the input model with the associated config passed in `bnb_quantization_config`. If the model is in the meta device, we will load and dispatch the weights according to the `device_map` passed. If the model is already loaded, we will quantize the model and put the model on the GPU, Args: model (`torch.nn.Module`): Input model. The model can be already loaded or on the meta device bnb_quantization_config (`BnbQuantizationConfig`): The bitsandbytes quantization parameters weights_location (`str` or `os.PathLike`): The folder weights_location to load. It can be: - a path to a file containing a whole model state dict - a path to a `.json` file containing the index to a sharded checkpoint - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. - a path to a folder containing a unique pytorch_model.bin file. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_state_dict (`bool`, *optional*, defaults to `False`): If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard does not fit. Returns: `torch.nn.Module`: The quantized model """ load_in_4bit = bnb_quantization_config.load_in_4bit load_in_8bit = bnb_quantization_config.load_in_8bit if load_in_8bit and not is_8bit_bnb_available(): raise ImportError( "You have a version of `bitsandbytes` that is not compatible with 8bit quantization," " make sure you have the latest version of `bitsandbytes` installed." ) if load_in_4bit and not is_4bit_bnb_available(): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 4bit quantization," "make sure you have the latest version of `bitsandbytes` installed." ) modules_on_cpu = [] # custom device map if isinstance(device_map, dict) and len(device_map.keys()) > 1: modules_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: bnb_quantization_config.skip_modules = get_keys_to_not_convert(model) # add cpu modules to skip modules only for 4-bit modules if load_in_4bit: bnb_quantization_config.skip_modules.extend(modules_on_cpu) modules_to_not_convert = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fp32_modules is None: bnb_quantization_config.keep_in_fp32_modules = [] keep_in_fp32_modules = bnb_quantization_config.keep_in_fp32_modules modules_to_not_convert.extend(keep_in_fp32_modules) # compatibility with peft model.is_loaded_in_4bit = load_in_4bit model.is_loaded_in_8bit = load_in_8bit model_device = get_parameter_device(model) if model_device.type != "meta": # quantization of an already loaded model logger.warning( "It is not recommended to quantize a loaded model. " "The model should be instantiated under the `init_empty_weights` context manager." ) model = replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert) # convert param to the right dtype dtype = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fp32 in name for module_to_keep_in_fp32 in keep_in_fp32_modules): param.to(torch.float32) if param.dtype != torch.float32: name = name.replace(".weight", "").replace(".bias", "") param = getattr(model, name, None) if param is not None: param.to(torch.float32) elif torch.is_floating_point(param): param.to(dtype) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device()) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device()) elif torch.xpu.is_available(): model.to(torch.xpu.current_device()) else: raise RuntimeError("No GPU found. A GPU is needed for quantization.") logger.info( f"The model device type is {model_device.type}. However, gpu is needed for quantization." "We move the model to gpu." ) return model elif weights_location is None: raise RuntimeError( f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " ) else: with init_empty_weights(): model = replace_with_bnb_layers( model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert ) device_map = get_quantized_model_device_map( model, bnb_quantization_config, device_map, max_memory=max_memory, no_split_module_classes=no_split_module_classes, ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): offload_state_dict = True offload = any(x in list(device_map.values()) for x in ["cpu", "disk"]) load_checkpoint_in_model( model, weights_location, device_map, dtype=bnb_quantization_config.torch_dtype, offload_folder=offload_folder, offload_state_dict=offload_state_dict, keep_in_fp32_modules=bnb_quantization_config.keep_in_fp32_modules, offload_8bit_bnb=load_in_8bit and offload, ) return dispatch_model(model, device_map=device_map, offload_dir=offload_folder) def get_quantized_model_device_map( model, bnb_quantization_config, device_map=None, max_memory=None, no_split_module_classes=None ): if device_map is None: if torch.cuda.is_available(): device_map = {"": torch.cuda.current_device()} else: raise RuntimeError("No GPU found. A GPU is needed for quantization.") logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.") if isinstance(device_map, str): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " "'sequential'." ) special_dtypes = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules) } ) special_dtypes.update( { name: torch.float32 for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fp32_modules) } ) kwargs = {} kwargs["special_dtypes"] = special_dtypes kwargs["no_split_module_classes"] = no_split_module_classes kwargs["dtype"] = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": max_memory = get_balanced_memory( model, low_zero=(device_map == "balanced_low_0"), max_memory=max_memory, **kwargs, ) kwargs["max_memory"] = max_memory device_map = infer_auto_device_map(model, **kwargs) if isinstance(device_map, dict): # check if don't have any quantized module on the cpu modules_not_to_convert = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fp32_modules device_map_without_some_modules = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_4bit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( "Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" ) del device_map_without_some_modules return device_map def replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=None, current_key_name=None): """ A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules or by `bnb.nn.Linear4bit` modules from the `bitsandbytes`library. The function will be run recursively and replace `torch.nn.Linear` modules. Parameters: model (`torch.nn.Module`): Input model or `torch.nn.Module` as the function is run recursively. modules_to_not_convert (`List[str]`): Names of the modules to not quantize convert. In practice we keep the `lm_head` in full precision for numerical stability reasons. current_key_name (`List[str]`, *optional*): An array to track the current key of the recursion. This is used to check whether the current key (part of it) is not in the list of modules to not convert. """ if modules_to_not_convert is None: modules_to_not_convert = [] model, has_been_replaced = _replace_with_bnb_layers( model, bnb_quantization_config, modules_to_not_convert, current_key_name ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model def _replace_with_bnb_layers( model, bnb_quantization_config, modules_to_not_convert=None, current_key_name=None, ): """ Private method that wraps the recursion for module replacement. Returns the converted model and a boolean that indicates if the conversion has been successfull or not. """ # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily import bitsandbytes as bnb has_been_replaced = False for name, module in model.named_children(): if current_key_name is None: current_key_name = [] current_key_name.append(name) if isinstance(module, nn.Linear) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` current_key_name_str = ".".join(current_key_name) proceed = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: proceed = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_8bit: bnb_module = bnb.nn.Linear8bitLt( module.in_features, module.out_features, module.bias is not None, has_fp16_weights=False, threshold=bnb_quantization_config.llm_int8_threshold, ) elif bnb_quantization_config.load_in_4bit: bnb_module = bnb.nn.Linear4bit( module.in_features, module.out_features, module.bias is not None, bnb_quantization_config.bnb_4bit_compute_dtype, compress_statistics=bnb_quantization_config.bnb_4bit_use_double_quant, quant_type=bnb_quantization_config.bnb_4bit_quant_type, ) else: raise ValueError("load_in_8bit and load_in_4bit can't be both False") bnb_module.weight.data = module.weight.data if module.bias is not None: bnb_module.bias.data = module.bias.data bnb_module.requires_grad_(False) setattr(model, name, bnb_module) has_been_replaced = True if len(list(module.children())) > 0: _, _has_been_replaced = _replace_with_bnb_layers( module, bnb_quantization_config, modules_to_not_convert, current_key_name ) has_been_replaced = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1) return model, has_been_replaced def get_keys_to_not_convert(model): r""" An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in int8. Parameters: model (`torch.nn.Module`): Input model """ # Create a copy of the model with init_empty_weights(): tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_params = find_tied_parameters(tied_model) # For compatibility with Accelerate < 0.18 if isinstance(tied_params, dict): tied_keys = sum(list(tied_params.values()), []) + list(tied_params.keys()) else: tied_keys = sum(tied_params, []) has_tied_params = len(tied_keys) > 0 # Check if it is a base model is_base_model = False if hasattr(model, "base_model_prefix"): is_base_model = not hasattr(model, model.base_model_prefix) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head list_modules = list(model.named_children()) list_last_module = [list_modules[-1][0]] # add last module together with tied weights intersection = set(list_last_module) - set(tied_keys) list_untouched = list(set(tied_keys)) + list(intersection) # remove ".weight" from the keys names_to_remove = [".weight", ".bias"] filtered_module_names = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: name = name.replace(name_to_remove, "") filtered_module_names.append(name) return filtered_module_names def has_4bit_bnb_layers(model): """Check if we have `bnb.nn.Linear4bit` or `bnb.nn.Linear8bitLt` layers inside our model""" # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily import bitsandbytes as bnb for m in model.modules(): if isinstance(m, bnb.nn.Linear4bit): return True return False def get_parameter_device(parameter: nn.Module): return next(parameter.parameters()).device def quantize_and_offload_8bit(model, param, param_name, new_dtype, offload_folder, offload_index, fp16_statistics): # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fp16_statistics is None: set_module_tensor_to_device(model, param_name, 0, dtype=new_dtype, value=param) tensor_name = param_name module = model if "." in tensor_name: splits = tensor_name.split(".") for split in splits[:-1]: new_module = getattr(module, split) if new_module is None: raise ValueError(f"{module} has no attribute {split}.") module = new_module tensor_name = splits[-1] # offload weights module._parameters[tensor_name].requires_grad = False offload_weight(module._parameters[tensor_name], param_name, offload_folder, index=offload_index) if hasattr(module._parameters[tensor_name], "SCB"): offload_weight( module._parameters[tensor_name].SCB, param_name.replace("weight", "SCB"), offload_folder, index=offload_index, ) else: offload_weight(param, param_name, offload_folder, index=offload_index) offload_weight(fp16_statistics, param_name.replace("weight", "SCB"), offload_folder, index=offload_index) set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype, value=torch.empty(*param.size()))
accelerate/src/accelerate/utils/bnb.py/0
{ "file_path": "accelerate/src/accelerate/utils/bnb.py", "repo_id": "accelerate", "token_count": 8809 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.metadata import subprocess import sys def install_xla(upgrade: bool = False): """ Helper function to install appropriate xla wheels based on the `torch` version in Google Colaboratory. Args: upgrade (`bool`, *optional*, defaults to `False`): Whether to upgrade `torch` and install the latest `torch_xla` wheels. Example: ```python >>> from accelerate.utils import install_xla >>> install_xla(upgrade=True) ``` """ in_colab = False if "IPython" in sys.modules: in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython()) if in_colab: if upgrade: torch_install_cmd = ["pip", "install", "-U", "torch"] subprocess.run(torch_install_cmd, check=True) # get the current version of torch torch_version = importlib.metadata.version("torch") torch_version_trunc = torch_version[: torch_version.rindex(".")] xla_wheel = f"https://storage.googleapis.com/tpu-pytorch/wheels/colab/torch_xla-{torch_version_trunc}-cp37-cp37m-linux_x86_64.whl" xla_install_cmd = ["pip", "install", xla_wheel] subprocess.run(xla_install_cmd, check=True) else: raise RuntimeError("`install_xla` utility works only on google colab.")
accelerate/src/accelerate/utils/torch_xla.py/0
{ "file_path": "accelerate/src/accelerate/utils/torch_xla.py", "repo_id": "accelerate", "token_count": 691 }
compute_environment: LOCAL_MACHINE debug: false distributed_type: MULTI_CPU downcast_bf16: 'no' ipex_config: ipex: true machine_rank: 0 main_process_ip: 127.0.0.1 main_process_port: 29500 main_training_function: main mixed_precision: 'no' mpirun_config: mpirun_ccl: '1' mpirun_hostfile: /home/user/hostfile num_machines: 4 num_processes: 16 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: true
accelerate/tests/test_configs/0_28_0_mpi.yaml/0
{ "file_path": "accelerate/tests/test_configs/0_28_0_mpi.yaml", "repo_id": "accelerate", "token_count": 193 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest import warnings from collections import OrderedDict from typing import Dict, Optional import torch import torch.nn as nn from parameterized import parameterized from safetensors.torch import save_file from accelerate import init_empty_weights from accelerate.big_modeling import cpu_offload from accelerate.test_utils import ( require_huggingface_suite, require_multi_device, require_non_cpu, torch_device, ) from accelerate.utils.modeling import ( align_module_device, check_device_map, clean_device_map, compute_module_sizes, compute_module_total_buffer_size, convert_file_size_to_int, find_tied_parameters, get_balanced_memory, get_module_size_with_ties, get_state_dict_offloaded_model, infer_auto_device_map, load_checkpoint_in_model, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, ) torch_device = f"{torch_device}:0" if torch_device != "cpu" else "cpu" class ModelForTest(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(3, 4) self.batchnorm = nn.BatchNorm1d(4) self.linear2 = nn.Linear(4, 5) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))) class NestedModelForTest(nn.Module): def __init__(self): super().__init__() self.model = ModelForTest() def forward(self, x): return self.model(x) class LinearWithNonPersistentBuffers(nn.Module): def __init__(self, in_features: int, out_features: int, bias: bool = True, device=None, dtype=None) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() self.in_features = in_features self.out_features = out_features self.register_buffer("weight", torch.empty((out_features, in_features), **factory_kwargs)) if bias: self.register_buffer("bias", torch.empty(out_features, **factory_kwargs), persistent=False) else: self.register_buffer("bias", None) def forward(self, input: torch.Tensor) -> torch.Tensor: return torch.nn.functional.linear(input, self.weight, self.bias) class ModelSeveralDtypes(nn.Module): def __init__(self): super().__init__() self.register_buffer("int_param", torch.randint(high=10, size=(15, 30))) self.register_parameter("float_param", torch.nn.Parameter(torch.rand(10, 5))) def forward(self, x): return x + 2 def sequential_model(num_layers): layers = OrderedDict([(f"linear{i}", nn.Linear(1000, 1000)) for i in range(1, num_layers + 1)]) return nn.Sequential(layers) class ModelingUtilsTester(unittest.TestCase): def check_set_module_tensor_for_device(self, model, device1, device2): assert model.linear1.weight.device == torch.device(device1) with self.subTest("Access by submodule and direct name for a parameter"): set_module_tensor_to_device(model.linear1, "weight", device2) assert model.linear1.weight.device == torch.device(device2) if torch.device(device2) == torch.device("meta"): with self.assertRaises(ValueError): # We need a `value` to set the weight back on device1 set_module_tensor_to_device(model.linear1, "weight", device1) set_module_tensor_to_device(model.linear1, "weight", device1, value=torch.randn(4, 3)) else: set_module_tensor_to_device(model.linear1, "weight", device1) assert model.linear1.weight.device == torch.device(device1) with self.subTest("Access by module and full name for a parameter"): set_module_tensor_to_device(model, "linear1.weight", device2) assert model.linear1.weight.device == torch.device(device2) if torch.device(device2) == torch.device("meta"): with self.assertRaises(ValueError): # We need a `value` to set the weight back on device1 set_module_tensor_to_device(model, "linear1.weight", device1) set_module_tensor_to_device(model, "linear1.weight", device1, value=torch.randn(4, 3)) else: set_module_tensor_to_device(model, "linear1.weight", device1) assert model.linear1.weight.device == torch.device(device1) assert model.batchnorm.running_mean.device == torch.device(device1) with self.subTest("Access by submodule and direct name for a buffer"): set_module_tensor_to_device(model.batchnorm, "running_mean", device2) assert model.batchnorm.running_mean.device == torch.device(device2) if torch.device(device2) == torch.device("meta"): with self.assertRaises(ValueError): # We need a `value` to set the weight back on device1 set_module_tensor_to_device(model.batchnorm, "running_mean", device1) set_module_tensor_to_device(model.batchnorm, "running_mean", device1, value=torch.randn(4)) else: set_module_tensor_to_device(model.batchnorm, "running_mean", device1) assert model.batchnorm.running_mean.device == torch.device(device1) with self.subTest("Access by module and full name for a parameter"): set_module_tensor_to_device(model, "batchnorm.running_mean", device2) assert model.batchnorm.running_mean.device == torch.device(device2) if torch.device(device2) == torch.device("meta"): with self.assertRaises(ValueError): # We need a `value` to set the weight back on CPU set_module_tensor_to_device(model, "batchnorm.running_mean", device1) set_module_tensor_to_device(model, "batchnorm.running_mean", device1, value=torch.randn(4)) else: set_module_tensor_to_device(model, "batchnorm.running_mean", device1) assert model.batchnorm.running_mean.device == torch.device(device1) def test_set_module_tensor_to_meta_and_cpu(self): model = ModelForTest() self.check_set_module_tensor_for_device(model, "cpu", "meta") @require_non_cpu def test_set_module_tensor_to_cpu_and_gpu(self): model = ModelForTest() self.check_set_module_tensor_for_device(model, "cpu", torch_device) @require_non_cpu def test_set_module_tensor_to_meta_and_gpu(self): model = ModelForTest().to(torch_device) self.check_set_module_tensor_for_device(model, torch_device, "meta") @require_multi_device def test_set_module_tensor_between_gpus(self): model = ModelForTest().to(torch_device) self.check_set_module_tensor_for_device(model, torch_device, torch_device.replace("0", "1")) def test_set_module_tensor_sets_dtype(self): model = ModelForTest() set_module_tensor_to_device(model, "linear1.weight", "cpu", value=model.linear1.weight, dtype=torch.float16) assert model.linear1.weight.dtype == torch.float16 def test_set_module_tensor_checks_shape(self): model = ModelForTest() tensor = torch.zeros((2, 2)) with self.assertRaises(ValueError) as cm: set_module_tensor_to_device(model, "linear1.weight", "cpu", value=tensor) assert ( str(cm.exception) == 'Trying to set a tensor of shape torch.Size([2, 2]) in "weight" (which has shape torch.Size([4, 3])), this looks incorrect.' ) def test_named_tensors(self): model = nn.BatchNorm1d(4) named_tensors = named_module_tensors(model) assert [name for name, _ in named_tensors] == [ "weight", "bias", "running_mean", "running_var", "num_batches_tracked", ] named_tensors = named_module_tensors(model, include_buffers=False) assert [name for name, _ in named_tensors] == ["weight", "bias"] model = ModelForTest() named_tensors = named_module_tensors(model) assert [name for name, _ in named_tensors] == [] named_tensors = named_module_tensors(model, recurse=True) assert [name for name, _ in named_tensors] == [ "linear1.weight", "linear1.bias", "batchnorm.weight", "batchnorm.bias", "linear2.weight", "linear2.bias", "batchnorm.running_mean", "batchnorm.running_var", "batchnorm.num_batches_tracked", ] named_tensors = named_module_tensors(model, include_buffers=False, recurse=True) assert [name for name, _ in named_tensors] == [ "linear1.weight", "linear1.bias", "batchnorm.weight", "batchnorm.bias", "linear2.weight", "linear2.bias", ] model = LinearWithNonPersistentBuffers(10, 10) named_tensors = named_module_tensors(model, include_buffers=True, remove_non_persistent=False) assert [name for name, _ in named_tensors] == ["weight", "bias"] named_tensors = named_module_tensors(model, include_buffers=True, remove_non_persistent=True) assert [name for name, _ in named_tensors] == ["weight"] def test_find_tied_parameters(self): model = sequential_model(4) assert find_tied_parameters(model) == [] model.linear2.weight = model.linear1.weight assert find_tied_parameters(model) == [["linear1.weight", "linear2.weight"]] model.linear4.weight = model.linear1.weight assert find_tied_parameters(model) == [["linear1.weight", "linear2.weight", "linear4.weight"]] model = sequential_model(5) model.linear1.weight = model.linear4.weight model.linear2.weight = model.linear3.weight model.linear5.weight = model.linear2.weight tied_params = sorted(find_tied_parameters(model), key=lambda x: len(x)) assert tied_params == [ ["linear1.weight", "linear4.weight"], ["linear2.weight", "linear3.weight", "linear5.weight"], ] model = nn.Sequential(OrderedDict([("block1", sequential_model(4)), ("block2", sequential_model(4))])) model.block1.linear1.weight = model.block2.linear1.weight assert find_tied_parameters(model) == [["block1.linear1.weight", "block2.linear1.weight"]] layer = nn.Linear(10, 10) model = nn.Sequential(layer, layer) tied_params = find_tied_parameters(model) assert sorted(tied_params) == [["0.bias", "1.bias"], ["0.weight", "1.weight"]] def test_retie_parameters(self): model = sequential_model(2) retie_parameters(model, [["linear1.weight", "linear2.weight"]]) assert model.linear1.weight is model.linear2.weight model = sequential_model(3) retie_parameters(model, [["linear1.weight", "linear2.weight", "linear3.weight"]]) assert model.linear1.weight is model.linear2.weight assert model.linear1.weight is model.linear3.weight model = sequential_model(5) retie_parameters( model, [["linear1.weight", "linear4.weight"], ["linear2.weight", "linear3.weight", "linear5.weight"]] ) assert model.linear1.weight is model.linear4.weight assert model.linear2.weight is model.linear3.weight assert model.linear2.weight is model.linear5.weight model = nn.Sequential(OrderedDict([("block1", sequential_model(4)), ("block2", sequential_model(4))])) retie_parameters(model, [["block1.linear1.weight", "block2.linear1.weight"]]) assert model.block1.linear1.weight is model.block2.linear1.weight def test_compute_module_sizes(self): model = ModelForTest() expected_sizes = {"": 236, "linear1": 64, "linear1.weight": 48, "linear1.bias": 16} expected_sizes.update({"linear2": 100, "linear2.weight": 80, "linear2.bias": 20}) expected_sizes.update({"batchnorm": 72, "batchnorm.weight": 16, "batchnorm.bias": 16}) expected_sizes.update( {"batchnorm.running_mean": 16, "batchnorm.running_var": 16, "batchnorm.num_batches_tracked": 8} ) module_sizes = compute_module_sizes(model) assert module_sizes == expected_sizes model.half() expected_sizes = {k: s // 2 for k, s in expected_sizes.items()} # This one is not converted to half. expected_sizes["batchnorm.num_batches_tracked"] = 8 # This impacts batchnorm and total expected_sizes["batchnorm"] += 4 expected_sizes[""] += 4 module_sizes = compute_module_sizes(model) assert module_sizes == expected_sizes def test_compute_module_total_buffer_size(self): model = ModelForTest() model.linear1.register_buffer("test_buffer", torch.zeros(10, 10)) model.register_buffer("test_buffer2", torch.zeros(20, 10)) buffer_size = compute_module_total_buffer_size(model) assert buffer_size == 1240 model.half() buffer_size = compute_module_total_buffer_size(model) assert buffer_size == 624 def test_check_device_map(self): model = ModelForTest() check_device_map(model, {"": 0}) with self.assertRaises(ValueError): check_device_map(model, {"linear1": 0, "linear2": 1}) check_device_map(model, {"linear1": 0, "linear2": 1, "batchnorm": 1}) def shard_test_model(self, model, tmp_dir): module_index = { "linear1": "checkpoint_part1.bin", "batchnorm": "checkpoint_part2.bin", "linear2": "checkpoint_part3.bin", } index = {} for name, _ in model.state_dict().items(): module = name.split(".")[0] index[name] = module_index[module] with open(os.path.join(tmp_dir, "weight_map.index.json"), "w") as f: json.dump(index, f) for module, fname in module_index.items(): state_dict = {k: v for k, v in model.state_dict().items() if k.startswith(module)} full_fname = os.path.join(tmp_dir, fname) torch.save(state_dict, full_fname) def test_load_checkpoint_in_model(self): # Check with whole checkpoint model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: fname = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), fname) load_checkpoint_in_model(model, fname) # Check with sharded index model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: self.shard_test_model(model, tmp_dir) index_file = os.path.join(tmp_dir, "weight_map.index.json") load_checkpoint_in_model(model, index_file) # Check with sharded checkpoint model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: self.shard_test_model(model, tmp_dir) load_checkpoint_in_model(model, tmp_dir) @require_non_cpu def test_load_checkpoint_in_model_one_gpu(self): device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": "cpu"} # Check with whole checkpoint model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: fname = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), fname) load_checkpoint_in_model(model, fname, device_map=device_map) assert model.linear1.weight.device == torch.device(torch_device) assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") # Check with sharded index model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: self.shard_test_model(model, tmp_dir) index_file = os.path.join(tmp_dir, "weight_map.index.json") load_checkpoint_in_model(model, index_file, device_map=device_map) assert model.linear1.weight.device == torch.device(torch_device) assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") # Check with sharded checkpoint folder model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: self.shard_test_model(model, tmp_dir) load_checkpoint_in_model(model, tmp_dir, device_map=device_map) assert model.linear1.weight.device == torch.device(torch_device) assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") @require_non_cpu def test_load_checkpoint_in_model_disk_offload(self): device_map = {"linear1": "cpu", "batchnorm": "disk", "linear2": "cpu"} model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: fname = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), fname) load_checkpoint_in_model(model, fname, device_map=device_map, offload_folder=tmp_dir) assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("meta") # Buffers are not offloaded by default assert model.batchnorm.running_mean.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: fname = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), fname) load_checkpoint_in_model(model, fname, device_map=device_map, offload_folder=tmp_dir, offload_buffers=True) assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("meta") assert model.batchnorm.running_mean.device == torch.device("meta") assert model.linear2.weight.device == torch.device("cpu") @require_multi_device def test_load_checkpoint_in_model_two_gpu(self): device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": 1} # Check with whole checkpoint model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: fname = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), fname) load_checkpoint_in_model(model, fname, device_map=device_map) assert model.linear1.weight.device == torch.device(torch_device) assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device(torch_device.replace("0", "1")) # Check with sharded index model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: self.shard_test_model(model, tmp_dir) index_file = os.path.join(tmp_dir, "weight_map.index.json") load_checkpoint_in_model(model, index_file, device_map=device_map) assert model.linear1.weight.device == torch.device(torch_device) assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device(torch_device.replace("0", "1")) # Check with sharded checkpoint model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: self.shard_test_model(model, tmp_dir) load_checkpoint_in_model(model, tmp_dir, device_map=device_map) assert model.linear1.weight.device == torch.device(torch_device) assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device(torch_device.replace("0", "1")) def test_load_checkpoint_in_model_dtype(self): with tempfile.NamedTemporaryFile(suffix=".pt") as tmpfile: model = ModelSeveralDtypes() torch.save(model.state_dict(), tmpfile.name) new_model = ModelSeveralDtypes() load_checkpoint_in_model( new_model, tmpfile.name, offload_state_dict=True, dtype=torch.float16, device_map={"": "cpu"} ) assert new_model.int_param.dtype == torch.int64 assert new_model.float_param.dtype == torch.float16 @parameterized.expand([(None,), ({"": "cpu"},)]) def test_load_checkpoint_in_model_unexpected_keys(self, device_map: Optional[Dict]): model = ModelForTest() state_dict = model.state_dict() state_dict["foo"] = torch.rand(4, 5) with tempfile.NamedTemporaryFile(suffix=".pt") as tmpfile: torch.save(state_dict, tmpfile) model = ModelForTest() with self.assertLogs() as cm: load_checkpoint_in_model(model, tmpfile.name, device_map=device_map) self.assertTrue(any("were not used when" in out for out in cm.output)) with self.assertRaises((ValueError, RuntimeError)): load_checkpoint_in_model(model, tmpfile.name, device_map=device_map, strict=True) def test_clean_device_map(self): # Regroup everything if all is on the same device assert clean_device_map({"a": 0, "b": 0, "c": 0}) == {"": 0} # Regroups children of level 1 on the same device assert clean_device_map({"a.x": 0, "a.y": 0, "b.x": 1, "b.y": 1, "c": 1}) == {"a": 0, "b": 1, "c": 1} # Regroups children of level 2 on the same device assert clean_device_map({"a.x": 0, "a.y": 0, "b.x.0": 1, "b.x.1": 1, "b.y.0": 2, "b.y.1": 2, "c": 2}) == { "a": 0, "b.x": 1, "b.y": 2, "c": 2, } def test_infer_auto_device_map(self): model = ModelForTest() # model has size 236: linear1 64, batchnorm 72, linear2 100 try: with self.assertLogs() as cm: device_map = infer_auto_device_map(model, max_memory={0: 200, 1: 200}) self.assertFalse(any("insufficient memory" in out for out in cm.output)) except AssertionError: # No logs exist; test passes implicitly pass # only linear1 fits on device 0 as we keep memory available for the maximum layer in case of offload assert device_map == {"linear1": 0, "batchnorm": 1, "linear2": 1} device_map = infer_auto_device_map(model, max_memory={0: 200, 1: 172, 2: 200}) # On device 1, we don't care about keeping size available for the max layer, so even if there is just the # size available for batchnorm + linear2, they fit here. assert device_map == {"linear1": 0, "batchnorm": 1, "linear2": 1} model.linear1.weight = model.linear2.weight device_map = infer_auto_device_map(model, max_memory={0: 200, 1: 200}) # By tying weights, the whole model fits on device 0 assert device_map == {"": 0} # When splitting a bigger model, the split is done at the layer level model = nn.Sequential(ModelForTest(), ModelForTest(), ModelForTest()) device_map = infer_auto_device_map(model, max_memory={0: 500, 1: 500}) assert device_map == {"0": 0, "1.linear1": 0, "1.batchnorm": 0, "1.linear2": 1, "2": 1} # With no_split_module_classes, it's done at that module level model = nn.Sequential(ModelForTest(), ModelForTest(), ModelForTest()) device_map = infer_auto_device_map( model, max_memory={0: 500, 1: 500}, no_split_module_classes=["ModelForTest"] ) assert device_map == {"0": 0, "1": 1, "2": 1} def test_infer_auto_device_map_with_tied_weights(self): model = nn.Sequential( OrderedDict([("layer1", ModelForTest()), ("layer2", ModelForTest()), ("layer3", ModelForTest())]) ) model.layer3.linear2.weight = model.layer1.linear2.weight device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 500}) expected = {"layer1": 0, "layer3.linear2": 0, "layer2": 1, "layer3.linear1": 1, "layer3.batchnorm": 1} assert device_map == expected # With three weights tied together model.layer2.linear2.weight = model.layer1.linear2.weight device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 500}) expected = { "layer1": 0, "layer2.linear2": 0, "layer3.linear2": 0, "layer2.linear1": 1, "layer2.batchnorm": 1, "layer3.linear1": 1, "layer3.batchnorm": 1, } assert device_map == expected # With two groups of weights tied together model.layer2.linear1.weight = model.layer1.linear1.weight device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 500}) expected = { "layer1": 0, "layer2.linear1": 0, "layer2.linear2": 0, "layer3.linear2": 0, "layer2.batchnorm": 1, "layer3.linear1": 1, "layer3.batchnorm": 1, } assert device_map == expected # With weights ties in the same module model = nn.Sequential( OrderedDict( [ ("linear1", nn.Linear(4, 4)), ("linear2", nn.Linear(6, 6)), ("linear3", nn.Linear(4, 4)), ("linear4", nn.Linear(6, 6)), ] ) ) model.linear3.weight = model.linear1.weight model.linear3.bias = model.linear1.bias device_map = infer_auto_device_map(model, max_memory={0: 250, 1: 400}) expected = {"linear1": 0, "linear2": 1, "linear3": 0, "linear4": 1} assert device_map == expected # With tied weights sharing a same prefix name (`compute.weight` vs `compute.weight_submodule.parameter`) class SubModule(torch.nn.Module): def __init__(self, ref_to_parameter): super().__init__() self.parameter = ref_to_parameter def forward(self, x): return self.x + torch.max(self.parameter) class LinearModuleAndSubModule(torch.nn.Linear): def __init__(self, in_features, out_features): super().__init__(in_features, out_features) self.weight_submodule = SubModule(self.weight) def forward(self, x): return torch.nn.functional.linear(self.weight_submodule(x), self.weight) class Model(torch.nn.Module): def __init__(self): super().__init__() self.compute = LinearModuleAndSubModule(3, 8) def forward(self, x): return self.compute(x) model = Model() device_memory = {0: 4, "cpu": 96000} # Low memory device, just to force splitting and trigger the error infer_auto_device_map(model, device_memory) @require_huggingface_suite def test_infer_auto_device_map_on_t0pp(self): from transformers import AutoConfig, AutoModelForSeq2SeqLM config = AutoConfig.from_pretrained("bigscience/T0pp") with init_empty_weights(): model = AutoModelForSeq2SeqLM.from_config(config) model.tie_weights() special_dtypes = {n: torch.float32 for n, _ in model.named_parameters() if "wo" in n} max_memory = {0: 10**10, 1: 10**10, "cpu": 10**10} device_map = infer_auto_device_map( model, no_split_module_classes=["T5Block"], dtype=torch.float16, max_memory=max_memory, special_dtypes=special_dtypes, ) # The 3 tied weights should all be on device 0 assert device_map["shared"] == 0 assert device_map["encoder.embed_tokens"] == 0 assert device_map["decoder.embed_tokens"] == 0 def test_infer_auto_device_map_with_buffer_check(self): model = ModelForTest() model.linear1.register_buffer("test_buffer1", torch.zeros(10, 2)) model.batchnorm.register_buffer("test_buffer2", torch.zeros(10, 3)) model.linear2.register_buffer("test_buffer3", torch.zeros(10, 3)) # model has size 236(parameters) + 360(buffers): linear1 64 + 80, batchnorm 72 + 160, linear2 100 + 120 # Only linear1 (144) fits on device 0, and remaining buffers (batchnorm's 160 + linear2's 120 = 280) won't fit # device 0, because they will also be loaded to device 0 all at once when inferencing without offload_buffers # Should print a warning as intended in such case with self.assertWarns(Warning): device_map = infer_auto_device_map(model, max_memory={0: 400, "cpu": "1GB"}) assert device_map == {"linear1": 0, "batchnorm": "cpu", "linear2": "cpu"} # Only linear1 (144) fits on device 0, and remaining buffers (batchnorm's 160 + linear2's 120 = 280) won't fit # device 0, but with offload_buffers they won't be loaded to device 0 all at once, so it's ok now # Should NOT print a warning in such case with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") device_map = infer_auto_device_map(model, max_memory={0: 400, "cpu": "1GB"}, offload_buffers=True) assert len(w) == 0 assert device_map == {"linear1": 0, "batchnorm": "cpu", "linear2": "cpu"} def test_infer_auto_device_map_with_buffer_check_and_multi_devices(self): model = ModelForTest() model.linear1.register_buffer("test_buffer1", torch.zeros(10, 2)) model.batchnorm.register_buffer("test_buffer2", torch.zeros(10, 3)) model.linear2.register_buffer("test_buffer3", torch.zeros(10, 3)) model.linear3 = nn.Linear(4, 5) model.linear3.register_buffer("test_buffer4", torch.zeros(10, 2)) # model has size 336(parameters) + 440(buffers): linear1 64 + 80, batchnorm 72 + 160, linear2 100 + 120, # linear3 100 + 80 # Now we have two devices, linear1 will fit on device 0, batchnorm will fit on device 1, and the second device # can hold all remaining buffers # Should NOT print a warning in such case with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 400, "cpu": "1GB"}) assert len(w) == 0 assert device_map == {"linear1": 0, "batchnorm": 1, "linear2": "cpu", "linear3": "cpu"} # Now we have two devices, but neither the first nor the second device can hold all remaining buffers # Should print a warning as intended in such case with self.assertWarns(Warning): device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 200, "cpu": "1GB"}) assert device_map == {"linear1": 0, "batchnorm": 1, "linear2": "cpu", "linear3": "cpu"} # Now we have two devices, neither can hold all the buffers, but we are using the offload_buffers=True # Should NOT print a warning in such case with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 200, "cpu": "1GB"}, offload_buffers=True) assert len(w) == 0 assert device_map == {"linear1": 0, "batchnorm": 1, "linear2": "cpu", "linear3": "cpu"} def test_infer_auto_device_map_with_fallback_allocation(self): # Create a model where modules cannot be allocated without fallback_allocation # Define the inner module with its layers inner_module = nn.Sequential( OrderedDict([("linear1", nn.Linear(10, 4)), ("linear2", nn.Linear(4, 4)), ("linear3", nn.Linear(4, 8))]) ) # Wrap the inner module in another module model = nn.Sequential(OrderedDict([("module", inner_module)])) max_memory = {0: 256} # Without fallback_allocation with self.assertLogs() as cm: device_map = infer_auto_device_map(model, max_memory=max_memory, fallback_allocation=False) # No module should be assigned to device 0 assert all(device != 0 for device in device_map.values()) # Check for warning about insufficient memory self.assertTrue(any("insufficient memory" in out for out in cm.output)) # With fallback_allocation try: with self.assertLogs() as cm: device_map = infer_auto_device_map(model, max_memory=max_memory, fallback_allocation=True) self.assertFalse(any("insufficient memory" in out for out in cm.output)) except AssertionError: # No logs exist; test passes implicitly pass # At least one submodule should be assigned to device 0 assert any(device == 0 for device in device_map.values()) expected_device_map = {"module.linear1": "disk", "module.linear2": 0, "module.linear3": "disk"} assert device_map == expected_device_map def test_infer_auto_device_map_with_fallback_allocation_no_fit(self): # Create a model where even the smallest submodules cannot fit inner_module = nn.Sequential( OrderedDict( [("linear1", nn.Linear(10, 10)), ("linear2", nn.Linear(10, 10)), ("linear3", nn.Linear(10, 10))] ) ) # Wrap the inner module in another module model = nn.Sequential(OrderedDict([("module", inner_module)])) max_memory = {0: 30} # With fallback_allocation try: with self.assertLogs() as cm: device_map = infer_auto_device_map(model, max_memory=max_memory, fallback_allocation=True) # No module should be assigned to device 0 assert all(device != 0 for device in device_map.values()) # Check for warning about insufficient memory self.assertTrue(any("insufficient memory" in out for out in cm.output)) except AssertionError: # No logs exist; test passes implicitly pass def test_infer_auto_device_map_with_fallback_allocation_partial_fit(self): # Create a model with deeper hierarchy class CustomModule(nn.Module): def __init__(self): super().__init__() self.submodule1 = nn.Linear(20, 20) self.submodule2 = nn.Linear(20, 20) model = nn.Sequential( OrderedDict([("module1", CustomModule()), ("module2", CustomModule()), ("module3", CustomModule())]) ) max_memory = {0: 5000} # With fallback_allocation device_map = infer_auto_device_map(model, max_memory=max_memory, fallback_allocation=True) # Check that at least some parameters are assigned to device 0 assigned_to_device_0 = [name for name, device in device_map.items() if device == 0] assert len(assigned_to_device_0) > 0 def test_infer_auto_device_map_with_fallback_allocation_tied_weights(self): # Create a model with tied weights class TiedWeightsModel(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(10, 10) self.linear2 = nn.Linear(10, 10) self.linear2.weight = self.linear1.weight model = TiedWeightsModel() max_memory = {0: 600} # With fallback_allocation device_map = infer_auto_device_map(model, max_memory=max_memory, fallback_allocation=True) # Check that tied modules are assigned correctly expected_device_map = {"": 0} assert device_map == expected_device_map def test_infer_auto_device_map_with_fallback_allocation_and_buffers(self): # Create a model with buffers model = nn.Sequential( OrderedDict( [("linear1", nn.Linear(10, 10)), ("batchnorm", nn.BatchNorm1d(10)), ("linear2", nn.Linear(10, 10))] ) ) model.linear1.register_buffer("buffer1", torch.zeros(5)) model.batchnorm.register_buffer("buffer2", torch.zeros(5)) model.linear2.register_buffer("buffer3", torch.zeros(5)) max_memory = {0: 678} # With fallback_allocation and offload_buffers=False with self.assertWarns(Warning) as cm: device_map = infer_auto_device_map( model, max_memory=max_memory, fallback_allocation=True, offload_buffers=False ) # Check that the warning contains the expected message warning_message = str(cm.warning) assert "offload_buffers" in warning_message or "Current model requires" in warning_message # Verify that the entire model is assigned to device 0 expected_device_map = {"batchnorm": 0, "linear1": "disk", "linear2": "disk"} assert device_map == expected_device_map @require_non_cpu def test_get_balanced_memory(self): model = ModelForTest() # model has size 236: linear1 64, batchnorm 72, linear2 100 max_memory = get_balanced_memory(model, max_memory={0: 200, 1: 200}) assert {0: 200, 1: 200} == max_memory # We should be able to set models on a non-contiguous sub-set of max_memory = get_balanced_memory(model, max_memory={0: 200, 2: 200}) assert {0: 200, 2: 200} == max_memory max_memory = get_balanced_memory(model, max_memory={0: 300, 1: 300}) assert {0: 215, 1: 300} == max_memory # Last device always get max memory to give more buffer and avoid accidental CPU offload max_memory = get_balanced_memory(model, max_memory={0: 300, 1: 500}) assert {0: 215, 1: 500} == max_memory # Last device always get max memory to give more buffer, even if CPU is provided max_memory = get_balanced_memory(model, max_memory={0: 300, "cpu": 1000}) assert {0: 300, "cpu": 1000} == max_memory # If we set a device to 0, it's not counted. max_memory = get_balanced_memory(model, max_memory={0: 0, 1: 300, 2: 300}) assert {0: 0, 1: 215, 2: 300} == max_memory # If we set a device to 0, it's not counted. max_memory = get_balanced_memory(model, max_memory={0: 0, "cpu": 100}) assert {0: 0, "cpu": 100} == max_memory # Tests that get_module_size_with_ties returns the correct tied modules in # models with tied parameters whose parent modules share the same name prefix # See issue #3308: https://github.com/huggingface/accelerate/issues/3308 def test_get_module_size_with_ties(self): # Create a model with a ModuleList containing more than 10 elements # so the names of some layers share the same prefix, e.g. "1" and "10" num_layers = 15 model = nn.ModuleList([nn.Linear(10, 10) for _ in range(num_layers)]) # Tie .weight for all the layers for i in range(1, num_layers): model[i].weight = model[i - 1].weight # Each tied parameter group is sorted in alphabetical ordering, # mimicking the output of find_tied_parameters tied_parameters = [sorted([f"{i}.weight" for i in range(num_layers)])] # Compute module sizes weight_size, bias_size = ( model[0].weight.element_size() * model[0].weight.numel(), model[0].bias.element_size() * model[0].bias.numel(), ) module_sizes = dict( **{"": num_layers * (weight_size + bias_size)}, **{f"{i}": (weight_size + bias_size) for i in range(num_layers)}, **{f"{i}.weight": weight_size for i in range(num_layers)}, **{f"{i}.bias": bias_size for i in range(num_layers)}, ) # Simulate the input for get_module_size_with_ties when invoked from infer_auto_device_map # when the first module in model is being processed modules_to_treat = list(model.named_children())[1:] tied_params = tied_parameters[0][1:] module_size = weight_size + bias_size module_size_with_ties, tied_module_names, tied_modules = get_module_size_with_ties( tied_params, module_size, module_sizes, modules_to_treat ) # The expected lists are ordered using as key the module names, to follow # the same order as the tied_parameters returned by find_tied_parameters expected_tied_module_names, expected_tied_modules = map( list, zip(*sorted(modules_to_treat, key=lambda x: x[0])) ) assert module_size_with_ties == module_size + (num_layers - 1) * bias_size assert tied_module_names == expected_tied_module_names assert tied_modules == expected_tied_modules @require_non_cpu def test_load_state_dict(self): state_dict = {k: torch.randn(4, 5) for k in ["a", "b", "c"]} device_maps = [{"a": "cpu", "b": 0, "c": "disk"}, {"a": 0, "b": 0, "c": "disk"}, {"a": 0, "b": 0, "c": 0}] for device_map in device_maps: with tempfile.TemporaryDirectory() as tmp_dir: checkpoint_file = os.path.join(tmp_dir, "model.safetensors") save_file(state_dict, checkpoint_file, metadata={"format": "pt"}) loaded_state_dict = load_state_dict(checkpoint_file, device_map=device_map) for param, device in device_map.items(): device = device if device != "disk" else "cpu" assert loaded_state_dict[param].device == torch.device(device) def test_convert_file_size(self): result = convert_file_size_to_int("0MB") assert result == 0 result = convert_file_size_to_int("100MB") assert result == (100 * (10**6)) result = convert_file_size_to_int("2GiB") assert result == (2 * (2**30)) result = convert_file_size_to_int("512KiB") assert result == (512 * (2**10)) result = convert_file_size_to_int("1.5GB") assert result == (1.5 * (10**9)) result = convert_file_size_to_int("100KB") assert result == (100 * (10**3)) result = convert_file_size_to_int(500) assert result == 500 with self.assertRaises(ValueError): convert_file_size_to_int("5MBB") with self.assertRaises(ValueError): convert_file_size_to_int("5k0MB") with self.assertRaises(ValueError): convert_file_size_to_int("-1GB") def test_get_state_dict_offloaded_model(self): for model_cls in (ModelForTest, NestedModelForTest): model = model_cls() execution_device = torch.device(torch_device) original_state_dict = model.state_dict() cpu_offload(model, execution_device=execution_device) state_dict = get_state_dict_offloaded_model(model) assert original_state_dict.keys() == state_dict.keys() for key in original_state_dict: assert torch.equal(original_state_dict[key], state_dict[key]) def test_align_module_device_simple(self): model = ModelForTest() execution_device = torch.device(torch_device) model_device = torch.device("cpu") # test default execution device with align_module_device(model.batchnorm): assert model.linear1.weight.device == model_device assert model.batchnorm.weight.device == model_device assert model.linear2.weight.device == model_device assert model.linear1.weight.device == model_device assert model.batchnorm.weight.device == model_device assert model.linear2.weight.device == model_device # test with explicit execution device with align_module_device(model.batchnorm, execution_device=execution_device): assert model.linear1.weight.device == model_device assert model.batchnorm.weight.device == execution_device assert model.linear2.weight.device == model_device assert model.linear1.weight.device == model_device assert model.batchnorm.weight.device == model_device assert model.linear2.weight.device == model_device def test_align_module_device_offloaded(self): model = ModelForTest() execution_device = torch.device(torch_device) offload_device = torch.device("meta") cpu_offload(model, execution_device=execution_device) # test default execution device with align_module_device(model.batchnorm): assert model.linear1.weight.device == offload_device assert model.batchnorm.weight.device == execution_device assert model.linear2.weight.device == offload_device assert model.linear1.weight.device == offload_device assert model.batchnorm.weight.device == offload_device assert model.linear2.weight.device == offload_device # test with explicit execution device with align_module_device(model.batchnorm, execution_device="cpu"): assert model.linear1.weight.device == offload_device assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == offload_device assert model.linear1.weight.device == offload_device assert model.batchnorm.weight.device == offload_device assert model.linear2.weight.device == offload_device def test_align_module_device_offloaded_nested(self): model = NestedModelForTest() execution_device = torch.device(torch_device) align_device = torch.device("cpu") cpu_offload(model, execution_device=execution_device) for module in model.modules(): with align_module_device(module, align_device): for param in model.parameters(recurse=False): assert param.device == align_device
accelerate/tests/test_modeling_utils.py/0
{ "file_path": "accelerate/tests/test_modeling_utils.py", "repo_id": "accelerate", "token_count": 20001 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate hf_table_format = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("", "|", "|"), datarow=DataRow("", "|", "|"), padding=1, with_header_hide=None, ) failed = [] group_info = [] no_error_payload = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}} payload = [ { "type": "header", "text": { "type": "plain_text", "text": f"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results", "emoji": True, }, } ] total_num_failed = 0 for log in Path().glob("*.log"): section_num_failed = 0 with open(log) as f: for line in f: line = json.loads(line) if line.get("nodeid", "") != "": test = line["nodeid"] if line.get("duration", None) is not None: duration = f'{line["duration"]:.4f}' if line.get("outcome", "") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("_")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) failed = [] log.unlink() message = "" all_files2failed = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += f"*{name[1:]}: {num_failed} failed test*\n" else: message += f"*{name[1:]}: {num_failed} failed tests*\n" failed_table = [] files2failed = {} for test in failed_tests: data = test[0].split("::") data[0] = data[0].split("/")[-1] if data[0] not in files2failed: files2failed[data[0]] = [data[1:]] else: files2failed[data[0]] += [data[1:]] failed_table.append(data) files = [test[0] for test in failed_table] individual_files = list(set(files)) # Count number of instances in failed_tests table = [] for file in individual_files: table.append([file, len(files2failed[file])]) failed_table = tabulate( table, headers=["Test Location", "Num Failed"], tablefmt=hf_table_format, stralign="right", ) message += f"\n```\n{failed_table}\n```" all_files2failed.append(files2failed) if len(message) > 3000: err = "Too many failed tests, please see the full report in the Action results." offset = len(err) + 10 message = message[: 3000 - offset] + f"\n...\n```\n{err}" print(f"### {message}") else: message = "No failed tests! 🤗" print(f"## {message}") payload.append(no_error_payload) if os.environ.get("TEST_TYPE", "") != "": from slack_sdk import WebClient client = WebClient(token=os.environ["SLACK_API_TOKEN"]) if message != "No failed tests! 🤗": md_report = { "type": "section", "text": { "type": "mrkdwn", "text": message, }, } payload.append(md_report) action_button = { "type": "section", "text": { "type": "mrkdwn", "text": "*For more details:*", }, "accessory": { "type": "button", "text": { "type": "plain_text", "text": "Check Action results", "emoji": True, }, "url": f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } payload.append(action_button) date_report = { "type": "context", "elements": [ { "type": "plain_text", "text": f"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}", } ], } payload.append(date_report) response = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload) ts = response.data["ts"] for failed_file in all_files2failed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name test_class = "" for i, row in enumerate(test_failures): if row[0] != test_class: test_class = row[0] else: test_failures[i][0] = "" payload = { "type": "section", "text": { "type": "mrkdwn", "text": f"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```", }, } client.chat_postMessage( channel="#accelerate-ci-daily", thread_ts=ts, blocks=[payload], )
accelerate/utils/log_reports.py/0
{ "file_path": "accelerate/utils/log_reports.py", "repo_id": "accelerate", "token_count": 3046 }
# Hello world! We will now create the hello world of the ML world, building a model capable of solving MNIST dataset. Open `src/main.rs` and fill in this content: ```rust # extern crate candle_core; use candle_core::{Device, Result, Tensor}; struct Model { first: Tensor, second: Tensor, } impl Model { fn forward(&self, image: &Tensor) -> Result<Tensor> { let x = image.matmul(&self.first)?; let x = x.relu()?; x.matmul(&self.second) } } fn main() -> Result<()> { // Use Device::new_cuda(0)?; to use the GPU. let device = Device::Cpu; let first = Tensor::randn(0f32, 1.0, (784, 100), &device)?; let second = Tensor::randn(0f32, 1.0, (100, 10), &device)?; let model = Model { first, second }; let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?; let digit = model.forward(&dummy_image)?; println!("Digit {digit:?} digit"); Ok(()) } ``` Everything should now run with: ```bash cargo run --release ``` ## Using a `Linear` layer. Now that we have this, we might want to complexify things a bit, for instance by adding `bias` and creating the classical `Linear` layer. We can do as such ```rust # extern crate candle_core; # use candle_core::{Device, Result, Tensor}; struct Linear{ weight: Tensor, bias: Tensor, } impl Linear{ fn forward(&self, x: &Tensor) -> Result<Tensor> { let x = x.matmul(&self.weight)?; x.broadcast_add(&self.bias) } } struct Model { first: Linear, second: Linear, } impl Model { fn forward(&self, image: &Tensor) -> Result<Tensor> { let x = self.first.forward(image)?; let x = x.relu()?; self.second.forward(&x) } } ``` This will change the model running code into a new function ```rust # extern crate candle_core; # use candle_core::{Device, Result, Tensor}; # struct Linear{ # weight: Tensor, # bias: Tensor, # } # impl Linear{ # fn forward(&self, x: &Tensor) -> Result<Tensor> { # let x = x.matmul(&self.weight)?; # x.broadcast_add(&self.bias) # } # } # # struct Model { # first: Linear, # second: Linear, # } # # impl Model { # fn forward(&self, image: &Tensor) -> Result<Tensor> { # let x = self.first.forward(image)?; # let x = x.relu()?; # self.second.forward(&x) # } # } fn main() -> Result<()> { // Use Device::new_cuda(0)?; to use the GPU. // Use Device::Cpu; to use the CPU. let device = Device::cuda_if_available(0)?; // Creating a dummy model let weight = Tensor::randn(0f32, 1.0, (784, 100), &device)?; let bias = Tensor::randn(0f32, 1.0, (100, ), &device)?; let first = Linear{weight, bias}; let weight = Tensor::randn(0f32, 1.0, (100, 10), &device)?; let bias = Tensor::randn(0f32, 1.0, (10, ), &device)?; let second = Linear{weight, bias}; let model = Model { first, second }; let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?; // Inference on the model let digit = model.forward(&dummy_image)?; println!("Digit {digit:?} digit"); Ok(()) } ``` Now it works, it is a great way to create your own layers. But most of the classical layers are already implemented in [candle-nn](https://github.com/huggingface/candle/tree/main/candle-nn). ## Using `candle_nn`. For instance [Linear](https://github.com/huggingface/candle/blob/main/candle-nn/src/linear.rs) is already there. This Linear is coded with PyTorch layout in mind, to reuse better existing models out there, so it uses the transpose of the weights and not the weights directly. So instead we can simplify our example: ```bash cargo add --git https://github.com/huggingface/candle.git candle-nn ``` And rewrite our examples using it ```rust # extern crate candle_core; # extern crate candle_nn; use candle_core::{Device, Result, Tensor}; use candle_nn::{Linear, Module}; struct Model { first: Linear, second: Linear, } impl Model { fn forward(&self, image: &Tensor) -> Result<Tensor> { let x = self.first.forward(image)?; let x = x.relu()?; self.second.forward(&x) } } fn main() -> Result<()> { // Use Device::new_cuda(0)?; to use the GPU. let device = Device::Cpu; // This has changed (784, 100) -> (100, 784) ! let weight = Tensor::randn(0f32, 1.0, (100, 784), &device)?; let bias = Tensor::randn(0f32, 1.0, (100, ), &device)?; let first = Linear::new(weight, Some(bias)); let weight = Tensor::randn(0f32, 1.0, (10, 100), &device)?; let bias = Tensor::randn(0f32, 1.0, (10, ), &device)?; let second = Linear::new(weight, Some(bias)); let model = Model { first, second }; let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?; let digit = model.forward(&dummy_image)?; println!("Digit {digit:?} digit"); Ok(()) } ``` Feel free to modify this example to use `Conv2d` to create a classical convnet instead. Now that we have the running dummy code we can get to more advanced topics: - [For PyTorch users](../guide/cheatsheet.md) - [Running existing models](../inference/inference.md) - [Training models](../training/training.md)
candle/candle-book/src/guide/hello_world.md/0
{ "file_path": "candle/candle-book/src/guide/hello_world.md", "repo_id": "candle", "token_count": 2069 }
//! Traits to Define Backend Behavior //! use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT}; use crate::{CpuStorage, DType, Layout, Result, Shape}; pub trait BackendStorage: Sized { type Device: BackendDevice; fn try_clone(&self, _: &Layout) -> Result<Self>; fn dtype(&self) -> DType; fn device(&self) -> &Self::Device; // Maybe this should return a Cow instead so that no copy is done on the cpu case. fn to_cpu_storage(&self) -> Result<CpuStorage>; fn affine(&self, _: &Layout, _: f64, _: f64) -> Result<Self>; fn powf(&self, _: &Layout, _: f64) -> Result<Self>; fn elu(&self, _: &Layout, _: f64) -> Result<Self>; fn reduce_op(&self, _: ReduceOp, _: &Layout, _: &[usize]) -> Result<Self>; fn cmp(&self, _: CmpOp, _: &Self, _: &Layout, _: &Layout) -> Result<Self>; fn to_dtype(&self, _: &Layout, _: DType) -> Result<Self>; fn unary_impl<B: UnaryOpT>(&self, _: &Layout) -> Result<Self>; fn binary_impl<B: BinaryOpT>(&self, _: &Self, _: &Layout, _: &Layout) -> Result<Self>; fn where_cond(&self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout) -> Result<Self>; fn conv1d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &crate::conv::ParamsConv1D, ) -> Result<Self>; fn conv_transpose1d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &crate::conv::ParamsConvTranspose1D, ) -> Result<Self>; fn conv2d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &crate::conv::ParamsConv2D, ) -> Result<Self>; fn conv_transpose2d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &crate::conv::ParamsConvTranspose2D, ) -> Result<Self>; fn avg_pool2d(&self, _: &Layout, _: (usize, usize), _: (usize, usize)) -> Result<Self>; fn max_pool2d(&self, _: &Layout, _: (usize, usize), _: (usize, usize)) -> Result<Self>; fn upsample_nearest1d(&self, _: &Layout, _: usize) -> Result<Self>; fn upsample_nearest2d(&self, _: &Layout, _: usize, _: usize) -> Result<Self>; fn gather(&self, _: &Layout, _: &Self, _: &Layout, _: usize) -> Result<Self>; fn scatter_add( &self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout, _: usize, ) -> Result<Self>; fn index_select(&self, _: &Self, _: &Layout, _: &Layout, _: usize) -> Result<Self>; fn index_add( &self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout, _: usize, ) -> Result<Self>; fn matmul( &self, _: &Self, _: (usize, usize, usize, usize), _: &Layout, _: &Layout, ) -> Result<Self>; fn copy_strided_src(&self, _: &mut Self, _: usize, _: &Layout) -> Result<()>; #[allow(clippy::too_many_arguments)] // Similar to cudaMemcpy2D, though values are in elements and not in bytes. fn copy2d( &self, _: &mut Self, _d1: usize, _d2: usize, _src_stride1: usize, _dst_stride1: usize, _src_offset: usize, _dst_offset: usize, ) -> Result<()>; } pub trait BackendDevice: Sized + std::fmt::Debug + Clone { type Storage: BackendStorage; // TODO: Make the usize generic and part of a generic DeviceLocation. fn new(_: usize) -> Result<Self>; fn location(&self) -> crate::DeviceLocation; fn same_device(&self, _: &Self) -> bool; fn zeros_impl(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage>; fn ones_impl(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage>; /// # Safety /// This function is unsafe as it doesn't initialize the underlying data store. /// The caller should ensure that the data is properly initialized as early as possible /// after this call. unsafe fn alloc_uninit(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage>; fn storage_from_slice<T: crate::WithDType>(&self, _: &[T]) -> Result<Self::Storage>; fn storage_from_cpu_storage(&self, _: &CpuStorage) -> Result<Self::Storage>; fn storage_from_cpu_storage_owned(&self, _: CpuStorage) -> Result<Self::Storage>; fn rand_uniform(&self, _: &Shape, _: DType, _: f64, _: f64) -> Result<Self::Storage>; fn rand_normal(&self, _: &Shape, _: DType, _: f64, _: f64) -> Result<Self::Storage>; fn set_seed(&self, _: u64) -> Result<()>; /// Synchronize should block until all the operations on the device are completed. fn synchronize(&self) -> Result<()>; }
candle/candle-core/src/backend.rs/0
{ "file_path": "candle/candle-core/src/backend.rs", "repo_id": "candle", "token_count": 2125 }
/// Helper functions to plug cuda kernels in candle. use crate::{Layout, Result, Shape, WithDType}; pub use cudarc; use cudarc::driver::{CudaSlice, DeviceRepr, ValidAsZeroBits}; use super::{CudaDevice, CudaError, WrapErr}; pub type S = super::CudaStorageSlice; pub trait Map1 { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>>; fn map(&self, s: &S, d: &CudaDevice, l: &Layout) -> Result<S> { let out = match s { S::U8(s) => S::U8(self.f(s, d, l)?), S::U32(s) => S::U32(self.f(s, d, l)?), S::I64(s) => S::I64(self.f(s, d, l)?), S::BF16(s) => S::BF16(self.f(s, d, l)?), S::F16(s) => S::F16(self.f(s, d, l)?), S::F32(s) => S::F32(self.f(s, d, l)?), S::F64(s) => S::F64(self.f(s, d, l)?), }; Ok(out) } } pub trait Map2 { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src1: &CudaSlice<T>, layout1: &Layout, src2: &CudaSlice<T>, layout2: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>>; fn map(&self, s1: &S, l1: &Layout, s2: &S, l2: &Layout, d: &CudaDevice) -> Result<S> { let out = match (s1, s2) { (S::U8(s1), S::U8(s2)) => S::U8(self.f(s1, l1, s2, l2, d)?), (S::U32(s1), S::U32(s2)) => S::U32(self.f(s1, l1, s2, l2, d)?), (S::I64(s1), S::I64(s2)) => S::I64(self.f(s1, l1, s2, l2, d)?), (S::BF16(s1), S::BF16(s2)) => S::BF16(self.f(s1, l1, s2, l2, d)?), (S::F16(s1), S::F16(s2)) => S::F16(self.f(s1, l1, s2, l2, d)?), (S::F32(s1), S::F32(s2)) => S::F32(self.f(s1, l1, s2, l2, d)?), (S::F64(s1), S::F64(s2)) => S::F64(self.f(s1, l1, s2, l2, d)?), _ => Err(CudaError::InternalError("dtype mismatch in binary op"))?, }; Ok(out) } } pub trait Map3 { #[allow(clippy::too_many_arguments)] fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src1: &CudaSlice<T>, layout1: &Layout, src2: &CudaSlice<T>, layout2: &Layout, src3: &CudaSlice<T>, layout3: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>>; #[allow(clippy::too_many_arguments)] fn map( &self, s1: &S, l1: &Layout, s2: &S, l2: &Layout, s3: &S, l3: &Layout, d: &CudaDevice, ) -> Result<S> { let out = match (s1, s2, s3) { (S::U8(s1), S::U8(s2), S::U8(s3)) => S::U8(self.f(s1, l1, s2, l2, s3, l3, d)?), (S::U32(s1), S::U32(s2), S::U32(s3)) => S::U32(self.f(s1, l1, s2, l2, s3, l3, d)?), (S::I64(s1), S::I64(s2), S::I64(s3)) => S::I64(self.f(s1, l1, s2, l2, s3, l3, d)?), (S::BF16(s1), S::BF16(s2), S::BF16(s3)) => S::BF16(self.f(s1, l1, s2, l2, s3, l3, d)?), (S::F16(s1), S::F16(s2), S::F16(s3)) => S::F16(self.f(s1, l1, s2, l2, s3, l3, d)?), (S::F32(s1), S::F32(s2), S::F32(s3)) => S::F32(self.f(s1, l1, s2, l2, s3, l3, d)?), (S::F64(s1), S::F64(s2), S::F64(s3)) => S::F64(self.f(s1, l1, s2, l2, s3, l3, d)?), _ => Err(CudaError::InternalError("dtype mismatch in ternary op"))?, }; Ok(out) } } pub trait Map2InPlace { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, dst: &mut CudaSlice<T>, dst_shape: &Shape, src: &CudaSlice<T>, src_l: &Layout, dev: &CudaDevice, ) -> Result<()>; fn map( &self, dst: &mut S, dst_s: &Shape, src: &S, src_l: &Layout, d: &CudaDevice, ) -> Result<()> { match (dst, src) { (S::U8(dst), S::U8(src)) => self.f(dst, dst_s, src, src_l, d), (S::U32(dst), S::U32(src)) => self.f(dst, dst_s, src, src_l, d), (S::I64(dst), S::I64(src)) => self.f(dst, dst_s, src, src_l, d), (S::BF16(dst), S::BF16(src)) => self.f(dst, dst_s, src, src_l, d), (S::F16(dst), S::F16(src)) => self.f(dst, dst_s, src, src_l, d), (S::F32(dst), S::F32(src)) => self.f(dst, dst_s, src, src_l, d), (S::F64(dst), S::F64(src)) => self.f(dst, dst_s, src, src_l, d), _ => Err(CudaError::InternalError("dtype mismatch in binary op"))?, } } } pub trait Map1Any { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits, W: Fn(CudaSlice<T>) -> S>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, wrap: W, ) -> Result<S>; fn map(&self, s: &S, d: &CudaDevice, l: &Layout) -> Result<S> { let out = match s { S::U8(s) => self.f(s, d, l, S::U8)?, S::U32(s) => self.f(s, d, l, S::U32)?, S::I64(s) => self.f(s, d, l, S::I64)?, S::BF16(s) => self.f(s, d, l, S::BF16)?, S::F16(s) => self.f(s, d, l, S::F16)?, S::F32(s) => self.f(s, d, l, S::F32)?, S::F64(s) => self.f(s, d, l, S::F64)?, }; Ok(out) } } pub trait Map2Any { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src1: &CudaSlice<T>, layout1: &Layout, src2: &CudaSlice<T>, layout2: &Layout, dev: &CudaDevice, ) -> Result<S>; fn map(&self, s1: &S, l1: &Layout, s2: &S, l2: &Layout, d: &CudaDevice) -> Result<S> { let out = match (s1, s2) { (S::U8(s1), S::U8(s2)) => self.f(s1, l1, s2, l2, d)?, (S::U32(s1), S::U32(s2)) => self.f(s1, l1, s2, l2, d)?, (S::I64(s1), S::I64(s2)) => self.f(s1, l1, s2, l2, d)?, (S::BF16(s1), S::BF16(s2)) => self.f(s1, l1, s2, l2, d)?, (S::F16(s1), S::F16(s2)) => self.f(s1, l1, s2, l2, d)?, (S::F32(s1), S::F32(s2)) => self.f(s1, l1, s2, l2, d)?, (S::F64(s1), S::F64(s2)) => self.f(s1, l1, s2, l2, d)?, _ => Err(CudaError::InternalError("dtype mismatch in binary op")).w()?, }; Ok(out) } }
candle/candle-core/src/cuda_backend/utils.rs/0
{ "file_path": "candle/candle-core/src/cuda_backend/utils.rs", "repo_id": "candle", "token_count": 3748 }
//! Just enough pickle support to be able to read PyTorch checkpoints. // This hardcodes objects that are required for tensor reading, we may want to make this a bit more // composable/tensor agnostic at some point. use crate::{Context, DType, Error as E, Layout, Result, Tensor}; use byteorder::{LittleEndian, ReadBytesExt}; use std::collections::HashMap; use std::io::BufRead; const VERBOSE: bool = false; // https://docs.juliahub.com/Pickle/LAUNc/0.1.0/opcode/ #[repr(u8)] #[derive(Debug, Eq, PartialEq, Clone)] pub enum OpCode { // https://github.com/python/cpython/blob/ed25f097160b5cbb0c9a1f9a746d2f1bbc96515a/Lib/pickletools.py#L2123 Proto = 0x80, Global = b'c', BinPut = b'q', LongBinPut = b'r', EmptyTuple = b')', Reduce = b'R', Mark = b'(', BinUnicode = b'X', BinInt = b'J', Tuple = b't', BinPersId = b'Q', BinInt1 = b'K', BinInt2 = b'M', Tuple1 = 0x85, Tuple2 = 0x86, Tuple3 = 0x87, NewTrue = 0x88, NewFalse = 0x89, None = b'N', BinGet = b'h', LongBinGet = b'j', SetItem = b's', SetItems = b'u', EmptyDict = b'}', Dict = b'd', Build = b'b', Stop = b'.', NewObj = 0x81, EmptyList = b']', BinFloat = b'G', Append = b'a', Appends = b'e', } // Avoid using FromPrimitive so as not to drag another dependency. impl TryFrom<u8> for OpCode { type Error = u8; fn try_from(value: u8) -> std::result::Result<Self, Self::Error> { match value { 0x80 => Ok(Self::Proto), b'c' => Ok(Self::Global), b'q' => Ok(Self::BinPut), b'r' => Ok(Self::LongBinPut), b')' => Ok(Self::EmptyTuple), b'R' => Ok(Self::Reduce), b'(' => Ok(Self::Mark), b'X' => Ok(Self::BinUnicode), b'J' => Ok(Self::BinInt), b't' => Ok(Self::Tuple), b'Q' => Ok(Self::BinPersId), b'K' => Ok(Self::BinInt1), b'M' => Ok(Self::BinInt2), b'N' => Ok(Self::None), 0x85 => Ok(Self::Tuple1), 0x86 => Ok(Self::Tuple2), 0x87 => Ok(Self::Tuple3), 0x88 => Ok(Self::NewTrue), 0x89 => Ok(Self::NewFalse), b'h' => Ok(Self::BinGet), b'j' => Ok(Self::LongBinGet), b's' => Ok(Self::SetItem), b'u' => Ok(Self::SetItems), b'}' => Ok(Self::EmptyDict), b'd' => Ok(Self::EmptyDict), b'b' => Ok(Self::Build), b'.' => Ok(Self::Stop), 0x81 => Ok(Self::NewObj), b']' => Ok(Self::EmptyList), b'G' => Ok(Self::BinFloat), b'a' => Ok(Self::Append), b'e' => Ok(Self::Appends), value => Err(value), } } } fn read_to_newline<R: BufRead>(r: &mut R) -> Result<Vec<u8>> { let mut data: Vec<u8> = Vec::with_capacity(32); r.read_until(b'\n', &mut data)?; data.pop(); if data.last() == Some(&b'\r') { data.pop(); } Ok(data) } #[derive(Debug, Clone, PartialEq)] pub enum Object { Class { module_name: String, class_name: String, }, Int(i32), Float(f64), Unicode(String), Bool(bool), None, Tuple(Vec<Object>), List(Vec<Object>), Mark, Dict(Vec<(Object, Object)>), Reduce { callable: Box<Object>, args: Box<Object>, }, Build { callable: Box<Object>, args: Box<Object>, }, PersistentLoad(Box<Object>), } type OResult<T> = std::result::Result<T, Object>; impl Object { pub fn unicode(self) -> OResult<String> { match self { Self::Unicode(t) => Ok(t), _ => Err(self), } } pub fn reduce(self) -> OResult<(Self, Self)> { match self { Self::Reduce { callable, args } => Ok((*callable, *args)), _ => Err(self), } } pub fn none(self) -> OResult<()> { match self { Self::None => Ok(()), _ => Err(self), } } pub fn persistent_load(self) -> OResult<Self> { match self { Self::PersistentLoad(t) => Ok(*t), _ => Err(self), } } pub fn bool(self) -> OResult<bool> { match self { Self::Bool(t) => Ok(t), _ => Err(self), } } pub fn int(self) -> OResult<i32> { match self { Self::Int(t) => Ok(t), _ => Err(self), } } pub fn tuple(self) -> OResult<Vec<Self>> { match self { Self::Tuple(t) => Ok(t), _ => Err(self), } } pub fn dict(self) -> OResult<Vec<(Self, Self)>> { match self { Self::Dict(t) => Ok(t), _ => Err(self), } } pub fn class(self) -> OResult<(String, String)> { match self { Self::Class { module_name, class_name, } => Ok((module_name, class_name)), _ => Err(self), } } pub fn into_tensor_info( self, name: Self, dir_name: &std::path::Path, ) -> Result<Option<TensorInfo>> { let name = match name.unicode() { Ok(name) => name, Err(_) => return Ok(None), }; let (callable, args) = match self.reduce() { Ok(callable_args) => callable_args, _ => return Ok(None), }; let (callable, args) = match callable { Object::Class { module_name, class_name, } if module_name == "torch._tensor" && class_name == "_rebuild_from_type_v2" => { let mut args = args.tuple()?; let callable = args.remove(0); let args = args.remove(1); (callable, args) } Object::Class { module_name, class_name, } if module_name == "torch._utils" && class_name == "_rebuild_parameter" => { let mut args = args.tuple()?; args.remove(0).reduce()? } _ => (callable, args), }; match callable { Object::Class { module_name, class_name, } if module_name == "torch._utils" && class_name == "_rebuild_tensor_v2" => {} _ => return Ok(None), }; let (layout, dtype, file_path, storage_size) = rebuild_args(args)?; Ok(Some(TensorInfo { name, dtype, layout, path: format!("{}/{}", dir_name.to_string_lossy(), file_path), storage_size, })) } } impl TryFrom<Object> for String { type Error = Object; fn try_from(value: Object) -> std::result::Result<Self, Self::Error> { match value { Object::Unicode(s) => Ok(s), other => Err(other), } } } impl TryFrom<Object> for usize { type Error = Object; fn try_from(value: Object) -> std::result::Result<Self, Self::Error> { match value { Object::Int(s) if s >= 0 => Ok(s as usize), other => Err(other), } } } impl<T: TryFrom<Object, Error = Object>> TryFrom<Object> for Vec<T> { type Error = Object; fn try_from(value: Object) -> std::result::Result<Self, Self::Error> { match value { Object::Tuple(values) => { // This does not return the appropriate value in the error case but instead return // the object related to the first error. values .into_iter() .map(|v| T::try_from(v)) .collect::<std::result::Result<Vec<T>, Self::Error>>() } other => Err(other), } } } #[derive(Debug)] pub struct Stack { stack: Vec<Object>, memo: HashMap<u32, Object>, } impl Stack { pub fn empty() -> Self { Self { stack: Vec::with_capacity(512), memo: HashMap::new(), } } pub fn stack(&self) -> &[Object] { self.stack.as_slice() } pub fn read_loop<R: BufRead>(&mut self, r: &mut R) -> Result<()> { loop { if self.read(r)? { break; } } Ok(()) } pub fn finalize(mut self) -> Result<Object> { self.pop() } fn push(&mut self, obj: Object) { self.stack.push(obj) } fn pop(&mut self) -> Result<Object> { match self.stack.pop() { None => crate::bail!("unexpected empty stack"), Some(obj) => Ok(obj), } } // https://docs.juliahub.com/Pickle/LAUNc/0.1.0/opcode/#Pickle.OpCodes.BUILD fn build(&mut self) -> Result<()> { let args = self.pop()?; let obj = self.pop()?; let obj = match (obj, args) { (Object::Dict(mut obj), Object::Dict(mut args)) => { obj.append(&mut args); Object::Dict(obj) } (obj, args) => Object::Build { callable: Box::new(obj), args: Box::new(args), }, }; self.push(obj); Ok(()) } fn reduce(&mut self) -> Result<()> { let args = self.pop()?; let callable = self.pop()?; #[allow(clippy::single_match)] let reduced = match &callable { Object::Class { module_name, class_name, } => { if module_name == "collections" && (class_name == "OrderedDict" || class_name == "defaultdict") { // TODO: have a separate ordered dict and a separate default dict. Some(Object::Dict(vec![])) } else { None } } _ => None, }; let reduced = reduced.unwrap_or_else(|| Object::Reduce { callable: Box::new(callable), args: Box::new(args), }); self.push(reduced); Ok(()) } fn last(&mut self) -> Result<&mut Object> { match self.stack.last_mut() { None => crate::bail!("unexpected empty stack"), Some(obj) => Ok(obj), } } fn memo_get(&self, id: u32) -> Result<Object> { match self.memo.get(&id) { None => crate::bail!("missing object in memo {id}"), Some(obj) => { // Maybe we should use refcounting rather than doing potential large clones here. Ok(obj.clone()) } } } fn memo_put(&mut self, id: u32) -> Result<()> { let obj = self.last()?.clone(); self.memo.insert(id, obj); Ok(()) } fn persistent_load(&self, id: Object) -> Result<Object> { Ok(Object::PersistentLoad(Box::new(id))) } fn new_obj(&self, class: Object, args: Object) -> Result<Object> { Ok(Object::Reduce { callable: Box::new(class), args: Box::new(args), }) } fn pop_to_marker(&mut self) -> Result<Vec<Object>> { let mut mark_idx = None; for (idx, obj) in self.stack.iter().enumerate().rev() { if obj == &Object::Mark { mark_idx = Some(idx); break; } } match mark_idx { Some(mark_idx) => { let objs = self.stack.split_off(mark_idx + 1); self.stack.pop(); Ok(objs) } None => { crate::bail!("marker object not found") } } } pub fn read<R: BufRead>(&mut self, r: &mut R) -> Result<bool> { let op_code = match OpCode::try_from(r.read_u8()?) { Ok(op_code) => op_code, Err(op_code) => { crate::bail!("unknown op-code {op_code}") } }; // println!("op: {op_code:?}"); // println!("{:?}", self.stack); match op_code { OpCode::Proto => { let version = r.read_u8()?; if VERBOSE { println!("proto {version}"); } } OpCode::Global => { let module_name = read_to_newline(r)?; let class_name = read_to_newline(r)?; let module_name = String::from_utf8_lossy(&module_name).to_string(); let class_name = String::from_utf8_lossy(&class_name).to_string(); self.push(Object::Class { module_name, class_name, }) } OpCode::BinInt1 => { let arg = r.read_u8()?; self.push(Object::Int(arg as i32)) } OpCode::BinInt2 => { let arg = r.read_u16::<LittleEndian>()?; self.push(Object::Int(arg as i32)) } OpCode::BinInt => { let arg = r.read_i32::<LittleEndian>()?; self.push(Object::Int(arg)) } OpCode::BinFloat => { // Somehow floats are encoded using BigEndian whereas int types use LittleEndian. // https://github.com/python/cpython/blob/0c80da4c14d904a367968955544dd6ae58c8101c/Lib/pickletools.py#L855 // https://github.com/pytorch/pytorch/blob/372d078f361e726bb4ac0884ac334b04c58179ef/torch/_weights_only_unpickler.py#L243 let arg = r.read_f64::<byteorder::BigEndian>()?; self.push(Object::Float(arg)) } OpCode::BinUnicode => { let len = r.read_u32::<LittleEndian>()?; let mut data = vec![0u8; len as usize]; r.read_exact(&mut data)?; let data = String::from_utf8(data).map_err(E::wrap)?; self.push(Object::Unicode(data)) } OpCode::BinPersId => { let id = self.pop()?; let obj = self.persistent_load(id)?; self.push(obj) } OpCode::Tuple => { let objs = self.pop_to_marker()?; self.push(Object::Tuple(objs)) } OpCode::Tuple1 => { let obj = self.pop()?; self.push(Object::Tuple(vec![obj])) } OpCode::Tuple2 => { let obj2 = self.pop()?; let obj1 = self.pop()?; self.push(Object::Tuple(vec![obj1, obj2])) } OpCode::Tuple3 => { let obj3 = self.pop()?; let obj2 = self.pop()?; let obj1 = self.pop()?; self.push(Object::Tuple(vec![obj1, obj2, obj3])) } OpCode::NewTrue => self.push(Object::Bool(true)), OpCode::NewFalse => self.push(Object::Bool(false)), OpCode::Append => { let value = self.pop()?; let pylist = self.last()?; if let Object::List(d) = pylist { d.push(value) } else { crate::bail!("expected a list, got {pylist:?}") } } OpCode::Appends => { let objs = self.pop_to_marker()?; let pylist = self.last()?; if let Object::List(d) = pylist { d.extend(objs) } else { crate::bail!("expected a list, got {pylist:?}") } } OpCode::SetItem => { let value = self.pop()?; let key = self.pop()?; let pydict = self.last()?; if let Object::Dict(d) = pydict { d.push((key, value)) } else { crate::bail!("expected a dict, got {pydict:?}") } } OpCode::SetItems => { let mut objs = self.pop_to_marker()?; let pydict = self.last()?; if let Object::Dict(d) = pydict { if objs.len() % 2 != 0 { crate::bail!("setitems: not an even number of objects") } while let Some(value) = objs.pop() { let key = objs.pop().context("empty objs")?; d.push((key, value)) } } else { crate::bail!("expected a dict, got {pydict:?}") } } OpCode::None => self.push(Object::None), OpCode::Stop => { return Ok(true); } OpCode::Build => self.build()?, OpCode::EmptyDict => self.push(Object::Dict(vec![])), OpCode::Dict => { let mut objs = self.pop_to_marker()?; let mut pydict = vec![]; if objs.len() % 2 != 0 { crate::bail!("setitems: not an even number of objects") } while let Some(value) = objs.pop() { let key = objs.pop().context("empty objs")?; pydict.push((key, value)) } self.push(Object::Dict(pydict)) } OpCode::Mark => self.push(Object::Mark), OpCode::Reduce => self.reduce()?, OpCode::EmptyTuple => self.push(Object::Tuple(vec![])), OpCode::EmptyList => self.push(Object::List(vec![])), OpCode::BinGet => { let arg = r.read_u8()?; let obj = self.memo_get(arg as u32)?; self.push(obj) } OpCode::LongBinGet => { let arg = r.read_u32::<LittleEndian>()?; let obj = self.memo_get(arg)?; self.push(obj) } OpCode::BinPut => { let arg = r.read_u8()?; self.memo_put(arg as u32)? } OpCode::LongBinPut => { let arg = r.read_u32::<LittleEndian>()?; self.memo_put(arg)? } OpCode::NewObj => { let args = self.pop()?; let class = self.pop()?; let obj = self.new_obj(class, args)?; self.push(obj) } } Ok(false) } } impl From<Object> for E { fn from(value: Object) -> Self { E::Msg(format!("conversion error on {value:?}")) } } // https://github.com/pytorch/pytorch/blob/4eac43d046ded0f0a5a5fa8db03eb40f45bf656e/torch/_utils.py#L198 // Arguments: storage, storage_offset, size, stride, requires_grad, backward_hooks fn rebuild_args(args: Object) -> Result<(Layout, DType, String, usize)> { let mut args = args.tuple()?; let stride = Vec::<usize>::try_from(args.remove(3))?; let size = Vec::<usize>::try_from(args.remove(2))?; let offset = args.remove(1).int()? as usize; let storage = args.remove(0).persistent_load()?; let mut storage = storage.tuple()?; let storage_size = storage.remove(4).int()? as usize; let path = storage.remove(2).unicode()?; let (_module_name, class_name) = storage.remove(1).class()?; let dtype = match class_name.as_str() { "FloatStorage" => DType::F32, "DoubleStorage" => DType::F64, "HalfStorage" => DType::F16, "BFloat16Storage" => DType::BF16, "ByteStorage" => DType::U8, "LongStorage" => DType::I64, other => { crate::bail!("unsupported storage type {other}") } }; let layout = Layout::new(crate::Shape::from(size), stride, offset); Ok((layout, dtype, path, storage_size)) } #[derive(Debug, Clone)] pub struct TensorInfo { pub name: String, pub dtype: DType, pub layout: Layout, pub path: String, pub storage_size: usize, } /// Read the tensor info from a .pth file. /// /// # Arguments /// * `file` - The path to the .pth file. /// * `verbose` - Whether to print debug information. /// * `key` - Optional key to retrieve `state_dict` from the pth file. pub fn read_pth_tensor_info<P: AsRef<std::path::Path>>( file: P, verbose: bool, key: Option<&str>, ) -> Result<Vec<TensorInfo>> { let file = std::fs::File::open(file)?; let zip_reader = std::io::BufReader::new(file); let mut zip = zip::ZipArchive::new(zip_reader)?; let zip_file_names = zip .file_names() .map(|f| f.to_string()) .collect::<Vec<String>>(); let mut tensor_infos = vec![]; for file_name in zip_file_names.iter() { if !file_name.ends_with("data.pkl") { continue; } let dir_name = std::path::PathBuf::from(file_name.strip_suffix(".pkl").context("no .pkl")?); let reader = zip.by_name(file_name)?; let mut reader = std::io::BufReader::new(reader); let mut stack = Stack::empty(); stack.read_loop(&mut reader)?; let obj = stack.finalize()?; if VERBOSE || verbose { println!("{obj:#?}"); } let obj = match obj { Object::Build { callable, args } => match *callable { Object::Reduce { callable, args: _ } => match *callable { Object::Class { module_name, class_name, } if module_name == "__torch__" && class_name == "Module" => *args, _ => continue, }, _ => continue, }, obj => obj, }; // If key is provided, then we need to extract the state_dict from the object. let obj = if let Some(key) = key { if let Object::Dict(key_values) = obj { key_values .into_iter() .find(|(k, _)| *k == Object::Unicode(key.to_owned())) .map(|(_, v)| v) .ok_or_else(|| E::Msg(format!("key {key} not found")))? } else { obj } } else { obj }; // If the object is a dict, then we can extract the tensor info from it. // NOTE: We are assuming that the `obj` is state_dict by this stage. if let Object::Dict(key_values) = obj { for (name, value) in key_values.into_iter() { match value.into_tensor_info(name, &dir_name) { Ok(Some(tensor_info)) => tensor_infos.push(tensor_info), Ok(None) => {} Err(err) => eprintln!("skipping: {err:?}"), } } } } Ok(tensor_infos) } /// Lazy tensor loader. pub struct PthTensors { tensor_infos: HashMap<String, TensorInfo>, path: std::path::PathBuf, // We do not store a zip reader as it needs mutable access to extract data. Instead we // re-create a zip reader for each tensor. } impl PthTensors { pub fn new<P: AsRef<std::path::Path>>(path: P, key: Option<&str>) -> Result<Self> { let tensor_infos = read_pth_tensor_info(path.as_ref(), false, key)?; let tensor_infos = tensor_infos .into_iter() .map(|ti| (ti.name.to_string(), ti)) .collect(); let path = path.as_ref().to_owned(); Ok(Self { tensor_infos, path }) } pub fn tensor_infos(&self) -> &HashMap<String, TensorInfo> { &self.tensor_infos } pub fn get(&self, name: &str) -> Result<Option<Tensor>> { use std::io::Read; let tensor_info = match self.tensor_infos.get(name) { None => return Ok(None), Some(tensor_info) => tensor_info, }; // We hope that the file has not changed since first reading it. let zip_reader = std::io::BufReader::new(std::fs::File::open(&self.path)?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut reader = zip.by_name(&tensor_info.path)?; let is_fortran_contiguous = tensor_info.layout.is_fortran_contiguous(); let rank = tensor_info.layout.shape().rank(); // Reading the data is a bit tricky as it can be strided, for now only support the basic // case and when the tensor is fortran contiguous. if !tensor_info.layout.is_contiguous() && !is_fortran_contiguous { crate::bail!( "cannot retrieve non-contiguous tensors {:?}", tensor_info.layout ) } let start_offset = tensor_info.layout.start_offset(); if start_offset > 0 { std::io::copy( &mut reader.by_ref().take(start_offset as u64), &mut std::io::sink(), )?; } let tensor = Tensor::from_reader( tensor_info.layout.shape().clone(), tensor_info.dtype, &mut reader, )?; if rank > 1 && is_fortran_contiguous { // Reverse the shape, e.g. Shape(2, 3, 4) -> Shape(4, 3, 2) let shape_reversed: Vec<_> = tensor_info.layout.dims().iter().rev().cloned().collect(); let tensor = tensor.reshape(shape_reversed)?; // Permute (transpose) the dimensions, e.g. Shape(4, 3, 2) -> Shape(2, 3, 4) let dim_indeces_reversed: Vec<_> = (0..rank).rev().collect(); let tensor = tensor.permute(dim_indeces_reversed)?; Ok(Some(tensor)) } else { Ok(Some(tensor)) } } } /// Read all the tensors from a PyTorch pth file with a given key. /// /// # Arguments /// * `path` - Path to the pth file. /// * `key` - Optional key to retrieve `state_dict` from the pth file. Sometimes the pth file /// contains multiple objects and the state_dict is the one we are interested in. pub fn read_all_with_key<P: AsRef<std::path::Path>>( path: P, key: Option<&str>, ) -> Result<Vec<(String, Tensor)>> { let pth = PthTensors::new(path, key)?; let tensor_names = pth.tensor_infos.keys(); let mut tensors = Vec::with_capacity(tensor_names.len()); for name in tensor_names { if let Some(tensor) = pth.get(name)? { tensors.push((name.to_string(), tensor)) } } Ok(tensors) } /// Read all the tensors from a PyTorch pth file. /// /// # Arguments /// * `path` - Path to the pth file. pub fn read_all<P: AsRef<std::path::Path>>(path: P) -> Result<Vec<(String, Tensor)>> { read_all_with_key(path, None) }
candle/candle-core/src/pickle.rs/0
{ "file_path": "candle/candle-core/src/pickle.rs", "repo_id": "candle", "token_count": 14325 }
use crate::{Result, Tensor}; use rayon::prelude::*; #[derive(Debug, Clone, Copy)] struct ArgSort { asc: bool, last_dim: usize, } impl ArgSort { fn asort<T: crate::WithDType>(&self, vs: &[T], layout: &crate::Layout) -> Vec<u32> { #[allow(clippy::uninit_vec)] // Safety: indexes are set later in the parallelized section. let mut sort_indexes = unsafe { let el_count = layout.shape().elem_count(); let mut v = Vec::with_capacity(el_count); v.set_len(el_count); v }; if self.asc { sort_indexes .par_chunks_exact_mut(self.last_dim) .zip(vs.par_chunks_exact(self.last_dim)) .for_each(|(indexes, vs)| { indexes .iter_mut() .enumerate() .for_each(|(i, v)| *v = i as u32); indexes.sort_by(|&i, &j| { vs[i as usize] .partial_cmp(&vs[j as usize]) .unwrap_or(std::cmp::Ordering::Greater) }) }); } else { sort_indexes .par_chunks_exact_mut(self.last_dim) .zip(vs.par_chunks_exact(self.last_dim)) .for_each(|(indexes, vs)| { indexes .iter_mut() .enumerate() .for_each(|(i, v)| *v = i as u32); indexes.sort_by(|&j, &i| { vs[i as usize] .partial_cmp(&vs[j as usize]) .unwrap_or(std::cmp::Ordering::Greater) }) }); } sort_indexes } } #[cfg(feature = "cuda")] mod cuda { use super::*; use crate::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, ValidAsZeroBits, }; use crate::cuda_backend::{kernel_name, kernels, CudaStorageSlice as S, WrapErr}; use crate::{CudaDevice, WithDType}; impl crate::cuda_backend::Map1Any for ArgSort { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits, W: Fn(CudaSlice<T>) -> S>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &crate::Layout, _wrap: W, ) -> Result<S> { let slice = match layout.contiguous_offsets() { None => crate::bail!("input has to be contiguous"), Some((o1, o2)) => src.slice(o1..o2), }; let elem_count = layout.shape().elem_count(); let dst = unsafe { dev.alloc::<u32>(elem_count) }.w()?; let func = if self.asc { dev.get_or_load_func(&kernel_name::<T>("asort_asc"), kernels::SORT)? } else { dev.get_or_load_func(&kernel_name::<T>("asort_desc"), kernels::SORT)? }; let ncols = self.last_dim; let nrows = elem_count / ncols; let ncols_pad = next_power_of_2(ncols); let params = (&slice, &dst, ncols as i32, ncols_pad as i32); let cfg = LaunchConfig { grid_dim: (1, nrows as u32, 1), block_dim: (ncols_pad as u32, 1, 1), shared_mem_bytes: (ncols_pad * std::mem::size_of::<u32>()) as u32, }; unsafe { func.launch(cfg, params) }.w()?; Ok(S::U32(dst)) } } } impl crate::CustomOp1 for ArgSort { fn name(&self) -> &'static str { "argsort" } fn cpu_fwd( &self, storage: &crate::CpuStorage, layout: &crate::Layout, ) -> Result<(crate::CpuStorage, crate::Shape)> { let sort_indexes = match storage { crate::CpuStorage::U8(vs) => self.asort(vs, layout), crate::CpuStorage::U32(vs) => self.asort(vs, layout), crate::CpuStorage::I64(vs) => self.asort(vs, layout), crate::CpuStorage::BF16(vs) => self.asort(vs, layout), crate::CpuStorage::F16(vs) => self.asort(vs, layout), crate::CpuStorage::F32(vs) => self.asort(vs, layout), crate::CpuStorage::F64(vs) => self.asort(vs, layout), }; let sort_indexes = crate::CpuStorage::U32(sort_indexes); Ok((sort_indexes, layout.shape().into())) } #[cfg(feature = "cuda")] fn cuda_fwd( &self, storage: &crate::CudaStorage, layout: &crate::Layout, ) -> Result<(crate::CudaStorage, crate::Shape)> { use crate::backend::BackendStorage; use crate::cuda_backend::Map1Any; let dev = storage.device(); let slice = self.map(&storage.slice, dev, layout)?; let dst = crate::cuda_backend::CudaStorage { slice, device: dev.clone(), }; Ok((dst, layout.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, storage: &crate::MetalStorage, layout: &crate::Layout, ) -> Result<(crate::MetalStorage, crate::Shape)> { use crate::backend::BackendStorage; use crate::DType; let name = { if self.asc { match storage.dtype() { DType::BF16 => "asort_asc_bf16", DType::F16 => "asort_asc_f16", DType::F32 => "asort_asc_f32", DType::F64 => "asort_asc_f64", DType::U8 => "asort_asc_u8", DType::U32 => "asort_asc_u32", DType::I64 => "asort_asc_i64", } } else { match storage.dtype() { DType::BF16 => "asort_desc_bf16", DType::F16 => "asort_desc_f16", DType::F32 => "asort_desc_f32", DType::F64 => "asort_desc_f64", DType::U8 => "asort_desc_u8", DType::U32 => "asort_desc_u32", DType::I64 => "asort_desc_i64", } } }; let device = storage.device(); let kernels = device.kernels(); let command_buffer = device.command_buffer()?; let el = layout.shape().elem_count(); let ncols = self.last_dim; let nrows = el / ncols; let src = crate::metal_backend::buffer_o(storage.buffer(), layout, storage.dtype()); let dst = device.new_buffer(el, DType::U32, "asort")?; let mut ncols_pad = 1; while ncols_pad < ncols { ncols_pad *= 2; } candle_metal_kernels::call_arg_sort( device.metal_device(), &command_buffer, kernels, name, nrows, ncols, ncols_pad, src, &dst, ) .map_err(crate::Error::wrap)?; let dst = crate::MetalStorage::new(dst, device.clone(), el, DType::U32); Ok((dst, layout.shape().clone())) } } #[allow(unused)] fn next_power_of_2(x: usize) -> usize { let mut n = 1; while n < x { n *= 2 } n } impl Tensor { /// Returns the indices that sort the tensor along the last dimension. /// /// If `asc` is `true`, sorting is in ascending order. Otherwise sorting is performed in /// descending order. The sort is unstable so there is no guarantees on the final order when it /// comes to ties. pub fn arg_sort_last_dim(&self, asc: bool) -> Result<Tensor> { if !self.is_contiguous() { return Err(crate::Error::RequiresContiguous { op: "arg_sort_last_dim", }); } let last_dim = match self.dims().last() { None => crate::bail!("empty last-dim in arg-sort"), Some(last_dim) => *last_dim, }; // No need for a backward pass for arg sort. self.apply_op1_no_bwd(&ArgSort { asc, last_dim }) } /// Sorts the tensor along the last dimension, returns the sorted tensor together with the /// sorted indexes. /// /// If `asc` is `true`, sorting is in ascending order. Otherwise sorting is performed in /// descending order. The sort is unstable so there is no guarantees on the final order when it /// comes to ties. pub fn sort_last_dim(&self, asc: bool) -> Result<(Tensor, Tensor)> { if !self.is_contiguous() { return Err(crate::Error::RequiresContiguous { op: "sort_last_dim", }); } let asort = self.arg_sort_last_dim(asc)?; let sorted = self.gather(&asort, crate::D::Minus1)?; Ok((sorted, asort)) } }
candle/candle-core/src/sort.rs/0
{ "file_path": "candle/candle-core/src/sort.rs", "repo_id": "candle", "token_count": 4723 }
use candle_core::{test_device, DType, Device, IndexOp, Result, Tensor}; fn matmul(device: &Device) -> Result<()> { let data = vec![1.0f32, 2.0, 3.0, 4.0]; let a = Tensor::from_slice(&data, (2, 2), device)?; let data = vec![1.0f32, 2.0, 3.0, 4.0]; let b = Tensor::from_slice(&data, (2, 2), device)?; let c = a.matmul(&b)?; assert_eq!(c.to_vec2::<f32>()?, &[[7.0f32, 10.0], [15.0, 22.0]]); let data = vec![1.0f32, 2.0]; let a = Tensor::from_slice(&data, (2, 1), device)?; let data = vec![3.0f32, 4.0]; let b = Tensor::from_slice(&data, (1, 2), device)?; let c = a.matmul(&b)?; assert_eq!(c.to_vec2::<f32>()?, &[&[3.0, 4.0], &[6.0, 8.0]]); let data: Vec<_> = (0..6).map(|i| i as f32).collect(); let a = Tensor::from_slice(&data, (2, 3), device)?; let data: Vec<_> = (0..6).map(|i| (i + 2) as f32).collect(); let b = Tensor::from_slice(&data, (3, 2), device)?; let c = a.matmul(&b)?; assert_eq!(c.to_vec2::<f32>()?, &[&[16., 19.], &[52., 64.]]); let data: Vec<_> = (0..12).map(|i| i as f32).collect(); let a = Tensor::from_slice(&data, (2, 2, 3), device)?; let data: Vec<_> = (0..12).map(|i| (i + 2) as f32).collect(); let b = Tensor::from_slice(&data, (2, 3, 2), device)?; let expected = [[[16., 19.], [52., 64.]], [[214., 235.], [304., 334.]]]; let c = a.matmul(&b)?; assert_eq!(c.to_vec3::<f32>()?, &expected); // Also perform the matmul on contiguous transposed versions. let a_tt = a.t()?.contiguous()?.t()?; assert!(!a_tt.is_contiguous()); assert_eq!(a.dims(), a_tt.dims()); assert_eq!(a_tt.stride(), &[6, 1, 2]); let b_tt = b.t()?.contiguous()?.t()?; assert!(!b_tt.is_contiguous()); assert_eq!(b.dims(), b_tt.dims()); assert_eq!(b_tt.stride(), &[6, 1, 3]); assert_eq!(a_tt.matmul(&b)?.to_vec3::<f32>()?, &expected); assert_eq!(a.matmul(&b_tt)?.to_vec3::<f32>()?, &expected); assert_eq!(a_tt.matmul(&b_tt)?.to_vec3::<f32>()?, &expected); Ok(()) } fn matmul_bf16(device: &Device) -> Result<()> { if !device.supports_bf16() { return Ok(()); } let data = vec![1.0f32, 2.0, 3.0, 4.0]; let a = Tensor::from_slice(&data, (2, 2), device)?.to_dtype(DType::BF16)?; let data = vec![1.0f32, 2.0, 3.0, 4.0]; let b = Tensor::from_slice(&data, (2, 2), device)?.to_dtype(DType::BF16)?; let c = a.matmul(&b)?.to_dtype(DType::F32)?; assert_eq!(c.to_vec2::<f32>()?, &[[7.0f32, 10.0], [15.0, 22.0]]); Ok(()) } fn broadcast_matmul(device: &Device) -> Result<()> { let lhs = Tensor::randn(0f32, 1f32, (3, 1, 4, 5), device)?; let rhs = Tensor::randn(0f32, 1f32, (6, 5, 2), device)?; let out = lhs.broadcast_matmul(&rhs)?; assert_eq!(out.dims(), &[3, 6, 4, 2]); for idx1 in 0..3 { for idx2 in 0..6 { let out = out.i((idx1, idx2))?; let lhs = lhs.i((idx1, 0))?; let rhs = rhs.i(idx2)?; let out2 = lhs.matmul(&rhs); let sum_diff2 = (out - out2)?.sqr()?.sum_all()?; // With cuda, we see errors of up to ~1e-12. assert!(sum_diff2.to_vec0::<f32>()? < 1e-6) } } Ok(()) } // https://github.com/huggingface/candle/issues/1948 fn squeeze_mm(device: &Device) -> Result<()> { let seq_len = 8_usize; let a = Tensor::zeros((1, seq_len, 16), DType::F32, device)?; let x = a.i((.., seq_len - 1, ..))?; let w = Tensor::zeros((32, 16), DType::F32, device)?.t()?; let x = x.matmul(&w)?; assert_eq!(x.dims(), &[1, 32]); Ok(()) } // https://github.com/huggingface/candle/issues/1992 fn mm_layout(device: &Device) -> Result<()> { let a = Tensor::arange(0f32, 16f32, device)?.reshape((1, 1, 4, 4))?; let b = Tensor::arange(0f32, 8f32, device)?.reshape((1, 1, 4, 2))?; let mm1 = a.matmul(&b)?; // Forces the layout to be: // shape: [1, 1, 4, 2], stride: [8, 2, 2, 1], start_offset: 0 // This is still a contiguous matrix but matmul checks are only the two last dimensions have // non 1 sizes but matmul check may be reluctant to handle it. let b = b.transpose(1, 2)?.force_contiguous()?.transpose(1, 2)?; let mm2 = a.matmul(&b)?; let diff = (mm1 - mm2)?.abs()?.sum_all()?.to_vec0::<f32>()?; assert_eq!(diff, 0.); Ok(()) } test_device!(matmul, matmul_cpu, matmul_gpu, matmul_metal); test_device!( matmul_bf16, matmul_bf16_cpu, matmul_bf16_gpu, matmul_bf16_metal ); test_device!( broadcast_matmul, broadcast_matmul_cpu, broadcast_matmul_gpu, broadcast_matmul_metal ); test_device!(squeeze_mm, squeeze_mm_cpu, squeeze_mm_gpu, squeeze_mm_metal); test_device!(mm_layout, mm_layout_cpu, mm_layout_gpu, mm_layout_metal);
candle/candle-core/tests/matmul_tests.rs/0
{ "file_path": "candle/candle-core/tests/matmul_tests.rs", "repo_id": "candle", "token_count": 2363 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::Parser; use candle_transformers::models::bigcode::{Config, GPTBigCode}; use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; struct TextGeneration { model: GPTBigCode, device: Device, tokenizer: Tokenizer, logits_processor: LogitsProcessor, } impl TextGeneration { fn new( model: GPTBigCode, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, tokenizer, logits_processor, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; println!("starting the inference loop"); print!("{prompt}"); std::io::stdout().flush()?; let mut tokens = self .tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let mut new_tokens = vec![]; let start_gen = std::time::Instant::now(); for index in 0..sample_len { let (context_size, past_len) = if self.model.config().use_cache && index > 0 { (1, tokens.len().saturating_sub(1)) } else { (tokens.len(), 0) }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = self.model.forward(&input, past_len)?; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); new_tokens.push(next_token); let token = self.tokenizer.decode(&[next_token], true).map_err(E::msg)?; print!("{token}"); std::io::stdout().flush()?; } let dt = start_gen.elapsed(); println!( "{sample_len} tokens generated ({:.3} token/s)", sample_len as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(long)] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, default_value_t = 100)] sample_len: usize, #[arg(long, default_value = "bigcode/starcoderbase-1b")] model_id: String, #[arg(long, default_value = "main")] revision: String, #[arg(long)] weight_file: Option<String>, } fn main() -> Result<()> { let args = Args::parse(); let start = std::time::Instant::now(); let api = Api::new()?; let repo = api.repo(Repo::with_revision( args.model_id, RepoType::Model, args.revision, )); let tokenizer_filename = repo.get("tokenizer.json")?; let filenames = match args.weight_file { Some(weight_file) => vec![std::path::PathBuf::from(weight_file)], None => ["model.safetensors"] .iter() .map(|f| repo.get(f)) .collect::<std::result::Result<Vec<_>, _>>()?, }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let device = candle_examples::device(args.cpu)?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, DType::F32, &device)? }; let config = Config::starcoder_1b(); let model = GPTBigCode::load(vb, config)?; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, tokenizer, args.seed, args.temperature, args.top_p, &device, ); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/bigcode/main.rs/0
{ "file_path": "candle/candle-examples/examples/bigcode/main.rs", "repo_id": "candle", "token_count": 2134 }
/* * Adapted from * https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/reduce_kernel_utils.cuh * Copyright (c) 2023, The vLLM team. * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once template <typename T> __inline__ __device__ T warpReduceSum(T val) { #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) val += __shfl_xor_sync(0xffffffff, val, mask, 32); return val; } /* Calculate the sum of all elements in a block */ template <typename T> __inline__ __device__ T blockReduceSum(T val) { static __shared__ T shared[32]; int lane = threadIdx.x & 0x1f; int wid = threadIdx.x >> 5; val = warpReduceSum<T>(val); if (lane == 0) shared[wid] = val; __syncthreads(); // Modify from blockDim.x << 5 to blockDim.x / 32. to prevent // blockDim.x is not divided by 32 val = (threadIdx.x < (blockDim.x / 32.f)) ? shared[lane] : (T)(0.0f); val = warpReduceSum<T>(val); return val; }
candle/candle-examples/examples/custom-ops/kernels/reduction_utils.cuh/0
{ "file_path": "candle/candle-examples/examples/custom-ops/kernels/reduction_utils.cuh", "repo_id": "candle", "token_count": 529 }
# candle-endocec [EnCodec](https://huggingface.co/facebook/encodec_24khz) is a high-quality audio compression model using an encoder/decoder architecture with residual vector quantization. ## Running one example ```bash cargo run --example encodec --features encodec --release -- code-to-audio \ candle-examples/examples/encodec/jfk-codes.safetensors \ jfk.wav ``` This decodes the EnCodec tokens stored in `jfk-codes.safetensors` and generates an output wav file containing the audio data. Instead of `code-to-audio` one can use: - `audio-to-audio in.mp3 out.wav`: encodes the input audio file then decodes it to a wav file. - `audio-to-code in.mp3 out.safetensors`: generates a safetensors file containing EnCodec tokens for the input audio file. If the audio output file name is set to `-`, the audio content directly gets played on default audio output device. If the audio input file is set to `-`, the audio gets recorded from the default audio input.
candle/candle-examples/examples/encodec/README.md/0
{ "file_path": "candle/candle-examples/examples/encodec/README.md", "repo_id": "candle", "token_count": 305 }
* GLM4 GLM-4-9B is the open-source version of the latest generation of pre-trained models in the GLM-4 series launched by Zhipu AI. - [[https://github.com/THUDM/GLM4][Github]] - [[https://huggingface.co/THUDM/glm-4-9b][huggingface]] ** Running with ~cuda~ #+begin_src shell cargo run --example glm4 --release --features cuda -- --prompt "Hello world" #+end_src ** Running with ~cpu~ #+begin_src shell cargo run --example glm4 --release -- --cpu--prompt "Hello world" #+end_src ** Output Example #+begin_src shell cargo run --features cuda -r --example glm4 -- --prompt "Hello " avx: true, neon: false, simd128: false, f16c: true temp: 0.60 repeat-penalty: 1.20 repeat-last-n: 64 retrieved the files in 6.454375ms loaded the model in 3.652383779s starting the inference loop Hello 2018, hello new year! I’m so excited to be back and sharing with you all my favorite things from the past month. This is a monthly series where I share what’s been inspiring me lately in hopes that it will inspire you too! ... #+end_src This example will read prompt from stdin * Citation #+begin_src @misc{glm2024chatglm, title={ChatGLM: A Family of Large Language Models from GLM-130B to GLM-4 All Tools}, author={Team GLM and Aohan Zeng and Bin Xu and Bowen Wang and Chenhui Zhang and Da Yin and Diego Rojas and Guanyu Feng and Hanlin Zhao and Hanyu Lai and Hao Yu and Hongning Wang and Jiadai Sun and Jiajie Zhang and Jiale Cheng and Jiayi Gui and Jie Tang and Jing Zhang and Juanzi Li and Lei Zhao and Lindong Wu and Lucen Zhong and Mingdao Liu and Minlie Huang and Peng Zhang and Qinkai Zheng and Rui Lu and Shuaiqi Duan and Shudan Zhang and Shulin Cao and Shuxun Yang and Weng Lam Tam and Wenyi Zhao and Xiao Liu and Xiao Xia and Xiaohan Zhang and Xiaotao Gu and Xin Lv and Xinghan Liu and Xinyi Liu and Xinyue Yang and Xixuan Song and Xunkai Zhang and Yifan An and Yifan Xu and Yilin Niu and Yuantao Yang and Yueyan Li and Yushi Bai and Yuxiao Dong and Zehan Qi and Zhaoyu Wang and Zhen Yang and Zhengxiao Du and Zhenyu Hou and Zihan Wang}, year={2024}, eprint={2406.12793}, archivePrefix={arXiv}, primaryClass={id='cs.CL' full_name='Computation and Language' is_active=True alt_name='cmp-lg' in_archive='cs' is_general=False description='Covers natural language processing. Roughly includes material in ACM Subject Class I.2.7. Note that work on artificial languages (programming languages, logics, formal systems) that does not explicitly address natural-language issues broadly construed (natural-language processing, computational linguistics, speech, text retrieval, etc.) is not appropriate for this area.'} } #+end_src #+begin_src @misc{wang2023cogvlm, title={CogVLM: Visual Expert for Pretrained Language Models}, author={Weihan Wang and Qingsong Lv and Wenmeng Yu and Wenyi Hong and Ji Qi and Yan Wang and Junhui Ji and Zhuoyi Yang and Lei Zhao and Xixuan Song and Jiazheng Xu and Bin Xu and Juanzi Li and Yuxiao Dong and Ming Ding and Jie Tang}, year={2023}, eprint={2311.03079}, archivePrefix={arXiv}, primaryClass={cs.CV} } #+end_src
candle/candle-examples/examples/glm4/README.org/0
{ "file_path": "candle/candle-examples/examples/glm4/README.org", "repo_id": "candle", "token_count": 1034 }
use candle::backend::BackendStorage; use candle::{CpuStorage, CustomOp1, DType, Device, IndexOp, Layout, Result, Shape, Tensor, D}; use candle_nn::var_builder::ShardedVarBuilder as VarBuilder; use candle_nn::{Embedding, Linear, Module, RmsNorm}; use cudarc::nccl::safe::{Comm, ReduceOp}; use std::rc::Rc; use std::sync::{Arc, Mutex}; use super::MAX_SEQ_LEN; pub type Config = candle_transformers::models::llama::LlamaConfig; struct TensorParallelColumnLinear { linear: Linear, } impl TensorParallelColumnLinear { fn new(linear: Linear) -> Self { Self { linear } } fn forward(&self, x: &Tensor) -> Result<Tensor> { self.linear.forward(x) } } struct TensorParallelRowLinear { linear: Linear, all_reduce: AllReduce, } struct AllReduce { comm: Rc<Comm>, } /// This is actually not safe: https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/usage/threadsafety.html /// But for this example purposes, this will work unsafe impl Sync for AllReduce {} unsafe impl Send for AllReduce {} impl CustomOp1 for AllReduce { fn name(&self) -> &'static str { "allreduce" } fn cpu_fwd(&self, _s: &CpuStorage, _l: &Layout) -> Result<(CpuStorage, Shape)> { candle::bail!("AllReduce is never used on cpu") } #[cfg(feature = "cuda")] fn cuda_fwd( &self, s: &candle::CudaStorage, l: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::cuda_backend::WrapErr; use cudarc::driver::DeviceSlice; use half::{bf16, f16}; let elem_count = l.shape().elem_count(); let dev = s.device().clone(); let dst = match s.dtype() { DType::BF16 => { let s = s.as_cuda_slice::<bf16>()?; let s = match l.contiguous_offsets() { Some((0, l)) if l == s.len() => s, Some(_) | None => candle::bail!("input has to be contiguous"), }; let mut dst = unsafe { dev.alloc::<bf16>(elem_count) }.w()?; self.comm .all_reduce(s, &mut dst, &ReduceOp::Sum) .map_err(candle::Error::debug)?; candle::CudaStorage::wrap_cuda_slice(dst, dev) } DType::F16 => { let s = s.as_cuda_slice::<f16>()?; let s = match l.contiguous_offsets() { Some((0, l)) if l == s.len() => s, Some(_) | None => candle::bail!("input has to be contiguous"), }; let mut dst = unsafe { dev.alloc::<f16>(elem_count) }.w()?; self.comm .all_reduce(s, &mut dst, &ReduceOp::Sum) .map_err(candle::Error::debug)?; candle::CudaStorage::wrap_cuda_slice(dst, dev) } dtype => candle::bail!("unsupported dtype {dtype:?}"), }; Ok((dst, l.shape().clone())) } } impl TensorParallelRowLinear { fn new(linear: Linear, comm: Rc<Comm>) -> Self { let all_reduce = AllReduce { comm }; Self { linear, all_reduce } } fn forward(&self, x: &Tensor) -> Result<Tensor> { self.linear.forward(x)?.apply_op1_no_bwd(&self.all_reduce) } } fn shard(dim: usize, rank: usize, world_size: usize) -> candle_nn::var_builder::Shard { candle_nn::var_builder::Shard { dim, rank, world_size, } } impl TensorParallelColumnLinear { fn load(vb: VarBuilder, comm: Rc<Comm>) -> Result<Self> { let rank = comm.rank(); let size = comm.world_size(); let weight = vb.get_with_hints((), "weight", shard(0, rank, size))?; Ok(Self::new(Linear::new(weight, None))) } fn load_multi(vb: VarBuilder, prefixes: &[&str], comm: Rc<Comm>) -> Result<Self> { let rank = comm.rank(); let size = comm.world_size(); let weights: Vec<_> = prefixes .iter() .map(|p| vb.pp(p).get_with_hints((), "weight", shard(0, rank, size))) .collect::<Result<Vec<_>>>()?; let weight = Tensor::cat(&weights, 0)?; Ok(Self::new(Linear::new(weight, None))) } } impl TensorParallelRowLinear { fn load(vb: VarBuilder, comm: Rc<Comm>) -> Result<Self> { let rank = comm.rank(); let size = comm.world_size(); let weight = vb.get_with_hints((), "weight", shard(1, rank, size))?; Ok(Self::new(Linear::new(weight, None), comm)) } } #[derive(Clone)] pub struct Cache { #[allow(clippy::type_complexity)] kvs: Arc<Mutex<Vec<Option<(Tensor, Tensor)>>>>, cos: Tensor, sin: Tensor, } impl Cache { pub fn new(dtype: DType, config: &Config, device: &Device) -> Result<Self> { // precompute freqs_cis let n_elem = config.hidden_size / config.num_attention_heads; let theta: Vec<_> = (0..n_elem) .step_by(2) .map(|i| 1f32 / config.rope_theta.powf(i as f32 / n_elem as f32)) .collect(); let theta = Tensor::new(theta.as_slice(), device)?; let idx_theta = Tensor::arange(0, MAX_SEQ_LEN as u32, device)? .to_dtype(DType::F32)? .reshape((MAX_SEQ_LEN, 1))? .matmul(&theta.reshape((1, theta.elem_count()))?)?; // This is different from the paper, see: // https://github.com/huggingface/transformers/blob/6112b1c6442aaf7affd2b0676a1cd4eee30c45cf/src/transformers/models/llama/modeling_llama.py#L112 let cos = idx_theta.cos()?.to_dtype(dtype)?; let sin = idx_theta.sin()?.to_dtype(dtype)?; Ok(Self { kvs: Arc::new(Mutex::new(vec![None; config.num_hidden_layers])), cos, sin, }) } } fn silu(xs: &Tensor) -> Result<Tensor> { xs / (xs.neg()?.exp()? + 1.0)? } fn linear(size1: usize, size2: usize, vb: VarBuilder) -> Result<Linear> { let weight = vb.get((size2, size1), "weight")?; Ok(Linear::new(weight, None)) } fn embedding(cfg: &Config, vb: VarBuilder) -> Result<Embedding> { let embeddings = vb.get((cfg.vocab_size, cfg.hidden_size), "weight")?; Ok(Embedding::new(embeddings, cfg.hidden_size)) } struct CausalSelfAttention { qkv_proj: TensorParallelColumnLinear, o_proj: TensorParallelRowLinear, num_attention_heads: usize, num_key_value_heads: usize, head_dim: usize, cache: Cache, } impl CausalSelfAttention { fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize) -> Result<Tensor> { let (_b_sz, _, seq_len, _hidden_size) = x.shape().dims4()?; let cos = self.cache.cos.narrow(0, index_pos, seq_len)?; let sin = self.cache.sin.narrow(0, index_pos, seq_len)?; candle_nn::rotary_emb::rope(x, &cos, &sin) } fn forward(&self, x: &Tensor, index_pos: usize, block_idx: usize) -> Result<Tensor> { let (b_sz, seq_len, _) = x.shape().dims3()?; let qkv = self.qkv_proj.forward(x)?; let hidden_size = self.num_attention_heads * self.head_dim; let q = qkv.i((.., .., ..self.num_attention_heads * self.head_dim))?; let k = qkv.i(( .., .., self.num_attention_heads * self.head_dim ..self.num_attention_heads * self.head_dim + self.num_key_value_heads * self.head_dim, ))?; let v = qkv.i(( .., .., self.num_attention_heads * self.head_dim + self.num_key_value_heads * self.head_dim.., ))?; // todo!("Q {:?} K {:?} V {:?} - x {:?}", q.shape(), k.shape(), v.shape(), x.shape()); let q = q .reshape((b_sz, seq_len, self.num_attention_heads, self.head_dim))? .transpose(1, 2)? .contiguous()?; let k = k .reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))? .transpose(1, 2)? .contiguous()?; let mut v = v .reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))? .transpose(1, 2)? .contiguous()?; let q = self.apply_rotary_emb(&q, index_pos)?; let mut k = self.apply_rotary_emb(&k, index_pos)?; let mut cache = self.cache.kvs.lock().unwrap(); if let Some((cache_k, cache_v)) = &cache[block_idx] { k = Tensor::cat(&[cache_k, &k], 2)?.contiguous()?; v = Tensor::cat(&[cache_v, &v], 2)?.contiguous()?; let k_seq_len = k.dims()[1]; if k_seq_len > MAX_SEQ_LEN { k = k .narrow(D::Minus1, k_seq_len - MAX_SEQ_LEN, MAX_SEQ_LEN)? .contiguous()? } let v_seq_len = v.dims()[1]; if v_seq_len > 2 * MAX_SEQ_LEN { v = v .narrow(D::Minus1, v_seq_len - MAX_SEQ_LEN, MAX_SEQ_LEN)? .contiguous()? } } cache[block_idx] = Some((k.clone(), v.clone())); let k = self.repeat_kv(k)?; let v = self.repeat_kv(v)?; let q = q.transpose(1, 2)?; let k = k.transpose(1, 2)?; let v = v.transpose(1, 2)?; let softmax_scale = 1f32 / (self.head_dim as f32).sqrt(); let y = candle_flash_attn::flash_attn(&q, &k, &v, softmax_scale, seq_len > 1)? .reshape((b_sz, seq_len, hidden_size))?; let y = self.o_proj.forward(&y)?; Ok(y) } fn repeat_kv(&self, x: Tensor) -> Result<Tensor> { let n_rep = self.num_attention_heads / self.num_key_value_heads; candle_transformers::utils::repeat_kv(x, n_rep) } fn load(vb: VarBuilder, cache: &Cache, cfg: &Config, comm: Rc<Comm>) -> Result<Self> { let qkv_proj = TensorParallelColumnLinear::load_multi( vb.clone(), &["q_proj", "k_proj", "v_proj"], comm.clone(), )?; let o_proj = TensorParallelRowLinear::load(vb.pp("o_proj"), comm.clone())?; Ok(Self { qkv_proj, o_proj, num_attention_heads: cfg.num_attention_heads / comm.world_size(), num_key_value_heads: cfg.num_key_value_heads() / comm.world_size(), head_dim: cfg.hidden_size / cfg.num_attention_heads, cache: cache.clone(), }) } } struct Mlp { c_fc1: TensorParallelColumnLinear, c_fc2: TensorParallelColumnLinear, c_proj: TensorParallelRowLinear, } impl Mlp { fn forward(&self, x: &Tensor) -> Result<Tensor> { let x = (silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?; self.c_proj.forward(&x) } fn load(vb: VarBuilder, _cfg: &Config, comm: Rc<Comm>) -> Result<Self> { let c_fc1 = TensorParallelColumnLinear::load(vb.pp("gate_proj"), comm.clone())?; let c_fc2 = TensorParallelColumnLinear::load(vb.pp("up_proj"), comm.clone())?; let c_proj = TensorParallelRowLinear::load(vb.pp("down_proj"), comm)?; Ok(Self { c_fc1, c_fc2, c_proj, }) } } struct Block { rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp, } fn rms_norm(size: usize, eps: f64, vb: VarBuilder) -> Result<RmsNorm> { let weight = vb.get_with_hints(size, "weight", shard(0, 0, 1))?; Ok(RmsNorm::new(weight, eps)) } impl Block { fn new(rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp) -> Self { Self { rms_1, attn, rms_2, mlp, } } fn forward(&self, x: &Tensor, index_pos: usize, block_idx: usize) -> Result<Tensor> { let residual = x; let x = self.rms_1.forward(x)?; let x = (self.attn.forward(&x, index_pos, block_idx)? + residual)?; let residual = &x; let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?; Ok(x) } fn load(vb: VarBuilder, cache: &Cache, cfg: &Config, comm: Rc<Comm>) -> Result<Self> { let attn = CausalSelfAttention::load(vb.pp("self_attn"), cache, cfg, comm.clone())?; let mlp = Mlp::load(vb.pp("mlp"), cfg, comm)?; let input_layernorm = rms_norm(cfg.hidden_size, 1e-5, vb.pp("input_layernorm"))?; let post_attention_layernorm = rms_norm(cfg.hidden_size, 1e-5, vb.pp("post_attention_layernorm"))?; Ok(Self::new( input_layernorm, attn, post_attention_layernorm, mlp, )) } } pub struct Llama { wte: Embedding, blocks: Vec<Block>, ln_f: RmsNorm, lm_head: Linear, } impl Llama { fn new(wte: Embedding, blocks: Vec<Block>, ln_f: RmsNorm, lm_head: Linear) -> Self { Self { wte, blocks, ln_f, lm_head, } } pub fn forward(&self, x: &Tensor, index_pos: usize) -> Result<Tensor> { let (_b_sz, seq_len) = x.shape().dims2()?; let mut x = self.wte.forward(x)?; for (block_idx, block) in self.blocks.iter().enumerate() { x = block.forward(&x, index_pos, block_idx)?; } let x = self.ln_f.forward(&x)?; let x = x.i((.., seq_len - 1, ..))?; let logits = self.lm_head.forward(&x)?; logits.to_dtype(DType::F32) } pub fn load(vb: VarBuilder, cache: &Cache, cfg: &Config, comm: Rc<Comm>) -> Result<Self> { let wte = embedding(cfg, vb.pp("model.embed_tokens"))?; let lm_head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; let norm = rms_norm(cfg.hidden_size, 1e-5, vb.pp("model.norm"))?; let blocks: Vec<_> = (0..cfg.num_hidden_layers) .map(|i| { Block::load( vb.pp(&format!("model.layers.{i}")), cache, cfg, comm.clone(), ) }) .collect::<Result<Vec<_>>>()?; Ok(Self::new(wte, blocks, norm, lm_head)) } }
candle/candle-examples/examples/llama_multiprocess/model.rs/0
{ "file_path": "candle/candle-examples/examples/llama_multiprocess/model.rs", "repo_id": "candle", "token_count": 7294 }
# candle-mimi [Mimi](https://huggingface.co/kyutai/mimi) is a state of the art audio compression model using an encoder/decoder architecture with residual vector quantization. The candle implementation supports streaming meaning that it's possible to encode or decode a stream of audio tokens on the flight to provide low latency interaction with an audio model. ## Running one example Generating some audio tokens from an audio files. ```bash wget https://github.com/metavoiceio/metavoice-src/raw/main/assets/bria.mp3 cargo run --example mimi --features mimi --release -- audio-to-code bria.mp3 bria.safetensors ``` And decoding the audio tokens back into a sound file. ```bash cargo run --example mimi --features mimi --release -- code-to-audio bria.safetensors bria.wav ```
candle/candle-examples/examples/mimi/README.md/0
{ "file_path": "candle/candle-examples/examples/mimi/README.md", "repo_id": "candle", "token_count": 228 }
# candle-moondream [Moondream](https://github.com/vikhyat/moondream) is a computer-vision model can answer real-world questions about images. It's tiny by today's models, with only 1.6B parameters. That enables it to run on a variety of devices, including mobile phones and edge devices. ## Running some examples First download an example image ```bash $ wget https://raw.githubusercontent.com/vikhyat/moondream/main/assets/demo-1.jpg ``` <img src="https://raw.githubusercontent.com/vikhyat/moondream/main/assets/demo-1.jpg" width="200"> Now you can run Moondream from the `candle-examples` crate: ```bash $ cargo run --example moondream --release -- --prompt "What is the girl eating?" --image "./demo-1.jpg" avavx: false, neon: true, simd128: false, f16c: false temp: 0.00 repeat-penalty: 1.00 repeat-last-n: 64 retrieved the files in 3.395583ms Running on CPU, to run on GPU(metal), build this example with `--features metal` loaded the model in 5.485493792s loaded and encoded the image Tensor[dims 3, 378, 378; f32] in 4.801396417s starting the inference loop The girl is eating a hamburger.< 9 tokens generated (0.68 token/s) ```
candle/candle-examples/examples/moondream/README.md/0
{ "file_path": "candle/candle-examples/examples/moondream/README.md", "repo_id": "candle", "token_count": 367 }
# candle-phi: 1.3b and 2.7b LLM with state of the art performance for <10b models. [Phi-1.5](https://huggingface.co/microsoft/phi-1_5), [Phi-2](https://huggingface.co/microsoft/phi-2), and [Phi-3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) are language models using only 1.3, 2.7, and 3.8 billion parameters but with state of the art performance compared to models with up to 10 billion parameters. The candle implementation provides both the standard version as well as a quantized variant. ## Running some examples For the v2 version. ```bash $ cargo run --example phi --release -- --model 2 \ --prompt "A skier slides down a frictionless slope of height 40m and length 80m. What's the skier speed at the bottom?" A skier slides down a frictionless slope of height 40m and length 80m. What's the skier speed at the bottom? Solution: The potential energy of the skier is converted into kinetic energy as it slides down the slope. The formula for potential energy is mgh, where m is mass, g is acceleration due to gravity (9.8 m/s^2), and h is height. Since there's no friction, all the potential energy is converted into kinetic energy at the bottom of the slope. The formula for kinetic energy is 1/2mv^2, where v is velocity. We can equate these two formulas: mgh = 1/2mv^2 Solving for v, we get: v = sqrt(2gh) Substituting the given values, we get: v = sqrt(2*9.8*40) = 28 m/s Therefore, the skier speed at the bottom of the slope is 28 m/s. ``` For the v1.5 version. ```bash $ cargo run --example phi --release -- --prompt "def print_prime(n): " def print_prime(n): print("Printing prime numbers") for i in range(2, n+1): if is_prime(i): print(i) def is_prime(n): if n <= 1: return False for i in range(2, int(math.sqrt(n))+1): if n % i == 0: return False return True $ cargo run --example phi --release -- \ --prompt "Explain how to find the median of an array and write the corresponding python function.\nAnswer:" \ --quantized --sample-len 200 Explain how to find the median of an array and write the corresponding python function. Answer: The median is the middle value in an array. If the array has an even number of elements, the median is the average of the two middle values. def median(arr): arr.sort() n = len(arr) if n % 2 == 0: return (arr[n//2 - 1] + arr[n//2]) / 2 else: return arr[n//2] ``` This also supports the [Puffin Phi v2 model](https://huggingface.co/teknium/Puffin-Phi-v2) for human interaction. ``` $ cargo run --example phi --release -- \ --prompt "USER: What would you do on a sunny day in Paris?\nASSISTANT:" \ --sample-len 200 --model puffin-phi-v2 --quantized USER: What would you do on a sunny day in Paris? ASSISTANT: On a sunny day in Paris, you could visit the Musée du Louvre to admire the famous painting "Mona Lisa" by Leonardo da Vinci. You might also want to stroll along the Champs-Élysées and enjoy the beautiful architecture of the buildings around you. Don't forget to stop by a café for a cup of coffee and to soak up the sun!" ```
candle/candle-examples/examples/phi/README.md/0
{ "file_path": "candle/candle-examples/examples/phi/README.md", "repo_id": "candle", "token_count": 1048 }
# candle-reinforcement-learning Reinforcement Learning examples for candle. This has been tested with `gymnasium` version `0.29.1`. You can install the Python package with: ```bash pip install "gymnasium[accept-rom-license]" ``` In order to run the examples, use the following commands. Note the additional `--package` flag to ensure that there is no conflict with the `candle-pyo3` crate. For the Policy Gradient example: ```bash cargo run --example reinforcement-learning --features=pyo3 --package candle-examples -- pg ``` For the Deep Deterministic Policy Gradient example: ```bash cargo run --example reinforcement-learning --features=pyo3 --package candle-examples -- ddpg ```
candle/candle-examples/examples/reinforcement-learning/README.md/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/README.md", "repo_id": "candle", "token_count": 198 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Result; use clap::{Parser, ValueEnum}; use candle_transformers::models::quantized_rwkv_v5::Model as Q5; use candle_transformers::models::quantized_rwkv_v6::Model as Q6; use candle_transformers::models::rwkv_v5::{Config, Model as M5, State, Tokenizer}; use candle_transformers::models::rwkv_v6::Model as M6; use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; const EOS_TOKEN_ID: u32 = 261; enum Model { M5(M5), Q5(Q5), M6(M6), Q6(Q6), } impl Model { fn forward(&self, xs: &Tensor, state: &mut State) -> candle::Result<Tensor> { match self { Self::M5(m) => m.forward(xs, state), Self::Q5(m) => m.forward(xs, state), Self::M6(m) => m.forward(xs, state), Self::Q6(m) => m.forward(xs, state), } } } struct TextGeneration { model: Model, config: Config, device: Device, tokenizer: Tokenizer, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, config: Config, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, config, tokenizer, logits_processor, repeat_penalty, repeat_last_n, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; let mut tokens = self.tokenizer.encode(prompt)?; let mut generated_tokens = 0usize; let mut state = State::new(1, &self.config, &self.device)?; let mut next_logits = None; for &t in tokens.iter() { let input = Tensor::new(&[[t]], &self.device)?; let logits = self.model.forward(&input, &mut state)?; next_logits = Some(logits); print!("{}", self.tokenizer.decode(&[t])?) } std::io::stdout().flush()?; let start_gen = std::time::Instant::now(); for _ in 0..sample_len { let logits = match next_logits.as_ref() { Some(logits) => logits, None => anyhow::bail!("cannot work on an empty prompt"), }; let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == EOS_TOKEN_ID || next_token == 0 { break; } print!("{}", self.tokenizer.decode(&[next_token])?); std::io::stdout().flush()?; let input = Tensor::new(&[[next_token]], &self.device)?; next_logits = Some(self.model.forward(&input, &mut state)?) } let dt = start_gen.elapsed(); println!( "\n{generated_tokens} tokens generated ({:.2} token/s)", generated_tokens as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, ValueEnum, Clone, Copy, PartialEq, Eq, Debug)] enum Which { Eagle7b, World1b5, World3b, World6_1b6, } impl std::fmt::Display for Which { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self) } } impl Which { fn model_id(&self) -> &'static str { match self { Self::Eagle7b => "RWKV/v5-Eagle-7B-HF", Self::World1b5 => "RWKV/rwkv-5-world-1b5", Self::World3b => "RWKV/rwkv-5-world-3b", Self::World6_1b6 => "paperfun/rwkv", } } fn revision(&self) -> &'static str { match self { Self::Eagle7b => "refs/pr/1", Self::World1b5 | Self::World3b => "refs/pr/2", Self::World6_1b6 => "main", } } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, short = 'n', default_value_t = 5000)] sample_len: usize, #[arg(long, default_value = "world1b5")] which: Which, #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, #[arg(long)] tokenizer: Option<String>, #[arg(long)] weight_files: Option<String>, #[arg(long)] config_file: Option<String>, #[arg(long)] quantized: bool, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = Api::new()?; let repo = api.repo(Repo::with_revision( args.model_id .unwrap_or_else(|| args.which.model_id().to_string()), RepoType::Model, args.revision .unwrap_or_else(|| args.which.revision().to_string()), )); let tokenizer = match args.tokenizer { Some(file) => std::path::PathBuf::from(file), None => api .model("lmz/candle-rwkv".to_string()) .get("rwkv_vocab_v20230424.json")?, }; let config_filename = match args.config_file { Some(file) => std::path::PathBuf::from(file), None => repo.get("config.json")?, }; let filenames = match args.weight_files { Some(files) => files .split(',') .map(std::path::PathBuf::from) .collect::<Vec<_>>(), None => { if args.quantized { vec![match args.which { Which::World1b5 => api .model("lmz/candle-rwkv".to_string()) .get("world1b5-q4k.gguf")?, Which::World3b => api .model("lmz/candle-rwkv".to_string()) .get("world3b-q4k.gguf")?, Which::Eagle7b => api .model("lmz/candle-rwkv".to_string()) .get("eagle7b-q4k.gguf")?, Which::World6_1b6 => repo.get("rwkv-6-world-1b6-q4k.gguf")?, }] } else { vec![match args.which { Which::World1b5 | Which::World3b | Which::Eagle7b => { repo.get("model.safetensors")? } Which::World6_1b6 => repo.get("rwkv-6-world-1b6.safetensors")?, }] } } }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::new(tokenizer)?; let start = std::time::Instant::now(); let config: Config = serde_json::from_slice(&std::fs::read(config_filename)?)?; let device = candle_examples::device(args.cpu)?; let model = if args.quantized { let filename = &filenames[0]; let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf(filename, &device)?; match args.which { Which::World1b5 | Which::World3b | Which::Eagle7b => Model::Q5(Q5::new(&config, vb)?), Which::World6_1b6 => Model::Q6(Q6::new(&config, vb)?), } } else { let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, DType::F32, &device)? }; match args.which { Which::World1b5 | Which::World3b | Which::Eagle7b => Model::M5(M5::new(&config, vb)?), Which::World6_1b6 => Model::M6(M6::new(&config, vb)?), } }; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, config, tokenizer, args.seed, args.temperature, args.top_p, args.repeat_penalty, args.repeat_last_n, &device, ); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/rwkv/main.rs/0
{ "file_path": "candle/candle-examples/examples/rwkv/main.rs", "repo_id": "candle", "token_count": 5059 }
use symphonia::core::audio::{AudioBufferRef, Signal}; use symphonia::core::codecs::{DecoderOptions, CODEC_TYPE_NULL}; use symphonia::core::conv::FromSample; fn conv<T>(samples: &mut Vec<f32>, data: std::borrow::Cow<symphonia::core::audio::AudioBuffer<T>>) where T: symphonia::core::sample::Sample, f32: symphonia::core::conv::FromSample<T>, { samples.extend(data.chan(0).iter().map(|v| f32::from_sample(*v))) } pub(crate) fn pcm_decode<P: AsRef<std::path::Path>>(path: P) -> anyhow::Result<(Vec<f32>, u32)> { // Open the media source. let src = std::fs::File::open(path)?; // Create the media source stream. let mss = symphonia::core::io::MediaSourceStream::new(Box::new(src), Default::default()); // Create a probe hint using the file's extension. [Optional] let hint = symphonia::core::probe::Hint::new(); // Use the default options for metadata and format readers. let meta_opts: symphonia::core::meta::MetadataOptions = Default::default(); let fmt_opts: symphonia::core::formats::FormatOptions = Default::default(); // Probe the media source. let probed = symphonia::default::get_probe().format(&hint, mss, &fmt_opts, &meta_opts)?; // Get the instantiated format reader. let mut format = probed.format; // Find the first audio track with a known (decodeable) codec. let track = format .tracks() .iter() .find(|t| t.codec_params.codec != CODEC_TYPE_NULL) .expect("no supported audio tracks"); // Use the default options for the decoder. let dec_opts: DecoderOptions = Default::default(); // Create a decoder for the track. let mut decoder = symphonia::default::get_codecs() .make(&track.codec_params, &dec_opts) .expect("unsupported codec"); let track_id = track.id; let sample_rate = track.codec_params.sample_rate.unwrap_or(0); let mut pcm_data = Vec::new(); // The decode loop. while let Ok(packet) = format.next_packet() { // Consume any new metadata that has been read since the last packet. while !format.metadata().is_latest() { format.metadata().pop(); } // If the packet does not belong to the selected track, skip over it. if packet.track_id() != track_id { continue; } match decoder.decode(&packet)? { AudioBufferRef::F32(buf) => pcm_data.extend(buf.chan(0)), AudioBufferRef::U8(data) => conv(&mut pcm_data, data), AudioBufferRef::U16(data) => conv(&mut pcm_data, data), AudioBufferRef::U24(data) => conv(&mut pcm_data, data), AudioBufferRef::U32(data) => conv(&mut pcm_data, data), AudioBufferRef::S8(data) => conv(&mut pcm_data, data), AudioBufferRef::S16(data) => conv(&mut pcm_data, data), AudioBufferRef::S24(data) => conv(&mut pcm_data, data), AudioBufferRef::S32(data) => conv(&mut pcm_data, data), AudioBufferRef::F64(data) => conv(&mut pcm_data, data), } } Ok((pcm_data, sample_rate)) }
candle/candle-examples/examples/whisper/pcm_decode.rs/0
{ "file_path": "candle/candle-examples/examples/whisper/pcm_decode.rs", "repo_id": "candle", "token_count": 1267 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; mod model; use model::{Multiples, YoloV8, YoloV8Pose}; use candle::{DType, Device, IndexOp, Result, Tensor}; use candle_nn::{Module, VarBuilder}; use candle_transformers::object_detection::{non_maximum_suppression, Bbox, KeyPoint}; use clap::{Parser, ValueEnum}; use image::DynamicImage; // Keypoints as reported by ChatGPT :) // Nose // Left Eye // Right Eye // Left Ear // Right Ear // Left Shoulder // Right Shoulder // Left Elbow // Right Elbow // Left Wrist // Right Wrist // Left Hip // Right Hip // Left Knee // Right Knee // Left Ankle // Right Ankle const KP_CONNECTIONS: [(usize, usize); 16] = [ (0, 1), (0, 2), (1, 3), (2, 4), (5, 6), (5, 11), (6, 12), (11, 12), (5, 7), (6, 8), (7, 9), (8, 10), (11, 13), (12, 14), (13, 15), (14, 16), ]; // Model architecture from https://github.com/ultralytics/ultralytics/issues/189 // https://github.com/tinygrad/tinygrad/blob/master/examples/yolov8.py pub fn report_detect( pred: &Tensor, img: DynamicImage, w: usize, h: usize, confidence_threshold: f32, nms_threshold: f32, legend_size: u32, ) -> Result<DynamicImage> { let pred = pred.to_device(&Device::Cpu)?; let (pred_size, npreds) = pred.dims2()?; let nclasses = pred_size - 4; // The bounding boxes grouped by (maximum) class index. let mut bboxes: Vec<Vec<Bbox<Vec<KeyPoint>>>> = (0..nclasses).map(|_| vec![]).collect(); // Extract the bounding boxes for which confidence is above the threshold. for index in 0..npreds { let pred = Vec::<f32>::try_from(pred.i((.., index))?)?; let confidence = *pred[4..].iter().max_by(|x, y| x.total_cmp(y)).unwrap(); if confidence > confidence_threshold { let mut class_index = 0; for i in 0..nclasses { if pred[4 + i] > pred[4 + class_index] { class_index = i } } if pred[class_index + 4] > 0. { let bbox = Bbox { xmin: pred[0] - pred[2] / 2., ymin: pred[1] - pred[3] / 2., xmax: pred[0] + pred[2] / 2., ymax: pred[1] + pred[3] / 2., confidence, data: vec![], }; bboxes[class_index].push(bbox) } } } non_maximum_suppression(&mut bboxes, nms_threshold); // Annotate the original image and print boxes information. let (initial_h, initial_w) = (img.height(), img.width()); let w_ratio = initial_w as f32 / w as f32; let h_ratio = initial_h as f32 / h as f32; let mut img = img.to_rgb8(); let font = Vec::from(include_bytes!("roboto-mono-stripped.ttf") as &[u8]); let font = ab_glyph::FontRef::try_from_slice(&font).map_err(candle::Error::wrap)?; for (class_index, bboxes_for_class) in bboxes.iter().enumerate() { for b in bboxes_for_class.iter() { println!( "{}: {:?}", candle_examples::coco_classes::NAMES[class_index], b ); let xmin = (b.xmin * w_ratio) as i32; let ymin = (b.ymin * h_ratio) as i32; let dx = (b.xmax - b.xmin) * w_ratio; let dy = (b.ymax - b.ymin) * h_ratio; if dx >= 0. && dy >= 0. { imageproc::drawing::draw_hollow_rect_mut( &mut img, imageproc::rect::Rect::at(xmin, ymin).of_size(dx as u32, dy as u32), image::Rgb([255, 0, 0]), ); } if legend_size > 0 { imageproc::drawing::draw_filled_rect_mut( &mut img, imageproc::rect::Rect::at(xmin, ymin).of_size(dx as u32, legend_size), image::Rgb([170, 0, 0]), ); let legend = format!( "{} {:.0}%", candle_examples::coco_classes::NAMES[class_index], 100. * b.confidence ); imageproc::drawing::draw_text_mut( &mut img, image::Rgb([255, 255, 255]), xmin, ymin, ab_glyph::PxScale { x: legend_size as f32 - 1., y: legend_size as f32 - 1., }, &font, &legend, ) } } } Ok(DynamicImage::ImageRgb8(img)) } pub fn report_pose( pred: &Tensor, img: DynamicImage, w: usize, h: usize, confidence_threshold: f32, nms_threshold: f32, ) -> Result<DynamicImage> { let pred = pred.to_device(&Device::Cpu)?; let (pred_size, npreds) = pred.dims2()?; if pred_size != 17 * 3 + 4 + 1 { candle::bail!("unexpected pred-size {pred_size}"); } let mut bboxes = vec![]; // Extract the bounding boxes for which confidence is above the threshold. for index in 0..npreds { let pred = Vec::<f32>::try_from(pred.i((.., index))?)?; let confidence = pred[4]; if confidence > confidence_threshold { let keypoints = (0..17) .map(|i| KeyPoint { x: pred[3 * i + 5], y: pred[3 * i + 6], mask: pred[3 * i + 7], }) .collect::<Vec<_>>(); let bbox = Bbox { xmin: pred[0] - pred[2] / 2., ymin: pred[1] - pred[3] / 2., xmax: pred[0] + pred[2] / 2., ymax: pred[1] + pred[3] / 2., confidence, data: keypoints, }; bboxes.push(bbox) } } let mut bboxes = vec![bboxes]; non_maximum_suppression(&mut bboxes, nms_threshold); let bboxes = &bboxes[0]; // Annotate the original image and print boxes information. let (initial_h, initial_w) = (img.height(), img.width()); let w_ratio = initial_w as f32 / w as f32; let h_ratio = initial_h as f32 / h as f32; let mut img = img.to_rgb8(); for b in bboxes.iter() { println!("{b:?}"); let xmin = (b.xmin * w_ratio) as i32; let ymin = (b.ymin * h_ratio) as i32; let dx = (b.xmax - b.xmin) * w_ratio; let dy = (b.ymax - b.ymin) * h_ratio; if dx >= 0. && dy >= 0. { imageproc::drawing::draw_hollow_rect_mut( &mut img, imageproc::rect::Rect::at(xmin, ymin).of_size(dx as u32, dy as u32), image::Rgb([255, 0, 0]), ); } for kp in b.data.iter() { if kp.mask < 0.6 { continue; } let x = (kp.x * w_ratio) as i32; let y = (kp.y * h_ratio) as i32; imageproc::drawing::draw_filled_circle_mut( &mut img, (x, y), 2, image::Rgb([0, 255, 0]), ); } for &(idx1, idx2) in KP_CONNECTIONS.iter() { let kp1 = &b.data[idx1]; let kp2 = &b.data[idx2]; if kp1.mask < 0.6 || kp2.mask < 0.6 { continue; } imageproc::drawing::draw_line_segment_mut( &mut img, (kp1.x * w_ratio, kp1.y * h_ratio), (kp2.x * w_ratio, kp2.y * h_ratio), image::Rgb([255, 255, 0]), ); } } Ok(DynamicImage::ImageRgb8(img)) } #[derive(Clone, Copy, ValueEnum, Debug)] enum Which { N, S, M, L, X, } #[derive(Clone, Copy, ValueEnum, Debug)] enum YoloTask { Detect, Pose, } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] pub struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// Model weights, in safetensors format. #[arg(long)] model: Option<String>, /// Which model variant to use. #[arg(long, value_enum, default_value_t = Which::S)] which: Which, images: Vec<String>, /// Threshold for the model confidence level. #[arg(long, default_value_t = 0.25)] confidence_threshold: f32, /// Threshold for non-maximum suppression. #[arg(long, default_value_t = 0.45)] nms_threshold: f32, /// The task to be run. #[arg(long, default_value = "detect")] task: YoloTask, /// The size for the legend, 0 means no legend. #[arg(long, default_value_t = 14)] legend_size: u32, } impl Args { fn model(&self) -> anyhow::Result<std::path::PathBuf> { let path = match &self.model { Some(model) => std::path::PathBuf::from(model), None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("lmz/candle-yolo-v8".to_string()); let size = match self.which { Which::N => "n", Which::S => "s", Which::M => "m", Which::L => "l", Which::X => "x", }; let task = match self.task { YoloTask::Pose => "-pose", YoloTask::Detect => "", }; api.get(&format!("yolov8{size}{task}.safetensors"))? } }; Ok(path) } } pub trait Task: Module + Sized { fn load(vb: VarBuilder, multiples: Multiples) -> Result<Self>; fn report( pred: &Tensor, img: DynamicImage, w: usize, h: usize, confidence_threshold: f32, nms_threshold: f32, legend_size: u32, ) -> Result<DynamicImage>; } impl Task for YoloV8 { fn load(vb: VarBuilder, multiples: Multiples) -> Result<Self> { YoloV8::load(vb, multiples, /* num_classes=*/ 80) } fn report( pred: &Tensor, img: DynamicImage, w: usize, h: usize, confidence_threshold: f32, nms_threshold: f32, legend_size: u32, ) -> Result<DynamicImage> { report_detect( pred, img, w, h, confidence_threshold, nms_threshold, legend_size, ) } } impl Task for YoloV8Pose { fn load(vb: VarBuilder, multiples: Multiples) -> Result<Self> { YoloV8Pose::load(vb, multiples, /* num_classes=*/ 1, (17, 3)) } fn report( pred: &Tensor, img: DynamicImage, w: usize, h: usize, confidence_threshold: f32, nms_threshold: f32, _legend_size: u32, ) -> Result<DynamicImage> { report_pose(pred, img, w, h, confidence_threshold, nms_threshold) } } pub fn run<T: Task>(args: Args) -> anyhow::Result<()> { let device = candle_examples::device(args.cpu)?; // Create the model and load the weights from the file. let multiples = match args.which { Which::N => Multiples::n(), Which::S => Multiples::s(), Which::M => Multiples::m(), Which::L => Multiples::l(), Which::X => Multiples::x(), }; let model = args.model()?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, &device)? }; let model = T::load(vb, multiples)?; println!("model loaded"); for image_name in args.images.iter() { println!("processing {image_name}"); let mut image_name = std::path::PathBuf::from(image_name); let original_image = image::ImageReader::open(&image_name)? .decode() .map_err(candle::Error::wrap)?; let (width, height) = { let w = original_image.width() as usize; let h = original_image.height() as usize; if w < h { let w = w * 640 / h; // Sizes have to be divisible by 32. (w / 32 * 32, 640) } else { let h = h * 640 / w; (640, h / 32 * 32) } }; let image_t = { let img = original_image.resize_exact( width as u32, height as u32, image::imageops::FilterType::CatmullRom, ); let data = img.to_rgb8().into_raw(); Tensor::from_vec( data, (img.height() as usize, img.width() as usize, 3), &device, )? .permute((2, 0, 1))? }; let image_t = (image_t.unsqueeze(0)?.to_dtype(DType::F32)? * (1. / 255.))?; let predictions = model.forward(&image_t)?.squeeze(0)?; println!("generated predictions {predictions:?}"); let image_t = T::report( &predictions, original_image, width, height, args.confidence_threshold, args.nms_threshold, args.legend_size, )?; image_name.set_extension("pp.jpg"); println!("writing {image_name:?}"); image_t.save(image_name)? } Ok(()) } pub fn main() -> anyhow::Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; match args.task { YoloTask::Detect => run::<YoloV8>(args)?, YoloTask::Pose => run::<YoloV8Pose>(args)?, } Ok(()) }
candle/candle-examples/examples/yolo-v8/main.rs/0
{ "file_path": "candle/candle-examples/examples/yolo-v8/main.rs", "repo_id": "candle", "token_count": 7410 }
use core::ffi::{c_int, c_void}; extern "C" { pub(crate) fn run_mha( q_ptr: *const c_void, k_ptr: *const c_void, v_ptr: *const c_void, o_ptr: *const c_void, softmax_lse_ptr: *const c_void, alibi_slopes_ptr: *const c_void, cu_seqlens_q_ptr: *const i32, cu_seqlens_k_ptr: *const i32, q_batch_stride: u32, k_batch_stride: u32, v_batch_stride: u32, o_batch_stride: u32, alibi_slopes_batch_stride: u32, q_row_stride: u32, k_row_stride: u32, v_row_stride: u32, o_row_stride: u32, q_head_stride: u32, k_head_stride: u32, v_head_stride: u32, o_head_stride: u32, b: u32, h: u32, h_k: u32, d: u32, d_rounded: u32, softmax_scale: f32, seqlen_q: u32, seqlen_k: u32, seqlen_q_rounded: u32, seqlen_k_rounded: u32, is_bf16: c_int, is_causal: c_int, unpadded_lse: c_int, window_size_left: c_int, window_size_right: c_int, softcap: f32, ); }
candle/candle-flash-attn/src/ffi.rs/0
{ "file_path": "candle/candle-flash-attn/src/ffi.rs", "repo_id": "candle", "token_count": 702 }
// Kernels adapted from llama.cpp ggml-cuda.cu // https://github.com/ggerganov/llama.cpp/blob/master/ggml-cuda.cu #include "cuda_fp16.h" #include "cuda_bf16.h" #include<stdint.h> #define GGML_UNUSED(x) (void)(x) #define GGML_CUDA_ASSUME(x) #ifdef GGML_QKK_64 #define QK_K 64 #define K_SCALE_SIZE 4 #else #define QK_K 256 #define K_SCALE_SIZE 12 #endif #undef GGML_CUDA_F16 #define GGML_CUDA_DMMV_X 32 #define CUDA_QUANTIZE_BLOCK_SIZE 256 #define CUDA_DEQUANTIZE_BLOCK_SIZE 256 #define K_QUANTS_PER_ITERATION 2 typedef uint16_t ggml_fp16_t; typedef float dfloat; // dequantize float typedef float2 dfloat2; typedef void (*dequantize_kernel_t)(const void * vx, const int ib, const int iqs, dfloat2 & v); static __device__ __forceinline__ float warp_reduce_sum(float x) { #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { x += __shfl_xor_sync(0xffffffff, x, mask, 32); } return x; } static __device__ __forceinline__ float warp_reduce_max(float x) { #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { x = fmaxf(x, __shfl_xor_sync(0xffffffff, x, mask, 32)); } return x; } static __device__ __forceinline__ int get_int_from_int8(const int8_t * x8, const int & i32) { const uint16_t * x16 = (const uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment int x32 = 0; x32 |= x16[0] << 0; x32 |= x16[1] << 16; return x32; } static __device__ __forceinline__ int get_int_from_uint8(const uint8_t * x8, const int & i32) { const uint16_t * x16 = (const uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment int x32 = 0; x32 |= x16[0] << 0; x32 |= x16[1] << 16; return x32; } static __device__ __forceinline__ int get_int_from_int8_aligned(const int8_t * x8, const int & i32) { return *((const int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment } static __device__ __forceinline__ int get_int_from_uint8_aligned(const uint8_t * x8, const int & i32) { return *((const int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment } #define WARP_SIZE 32 #define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed) #define CC_PASCAL 600 #define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products #define CC_VOLTA 700 #define CC_OFFSET_AMD 1000000 #define CC_RDNA1 (CC_OFFSET_AMD + 1010) #define CC_RDNA2 (CC_OFFSET_AMD + 1030) #define CC_RDNA3 (CC_OFFSET_AMD + 1100) static __device__ __forceinline__ int ggml_cuda_dp4a(const int a, const int b, int c) { #if __CUDA_ARCH__ >= MIN_CC_DP4A return __dp4a(a, b, c); #else // __CUDA_ARCH__ >= MIN_CC_DP4A const int8_t * a8 = (const int8_t *) &a; const int8_t * b8 = (const int8_t *) &b; return c + a8[0]*b8[0] + a8[1]*b8[1] + a8[2]*b8[2] + a8[3]*b8[3]; #endif // __CUDA_ARCH__ >= MIN_CC_DP4A } #define MMQ_X_Q4_0_RDNA2 64 #define MMQ_Y_Q4_0_RDNA2 128 #define NWARPS_Q4_0_RDNA2 8 #define MMQ_X_Q4_0_RDNA1 64 #define MMQ_Y_Q4_0_RDNA1 64 #define NWARPS_Q4_0_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q4_0_AMPERE 4 #define MMQ_Y_Q4_0_AMPERE 32 #define NWARPS_Q4_0_AMPERE 4 #else #define MMQ_X_Q4_0_AMPERE 64 #define MMQ_Y_Q4_0_AMPERE 128 #define NWARPS_Q4_0_AMPERE 4 #endif #define MMQ_X_Q4_0_PASCAL 64 #define MMQ_Y_Q4_0_PASCAL 64 #define NWARPS_Q4_0_PASCAL 8 #define MMQ_X_Q4_1_RDNA2 64 #define MMQ_Y_Q4_1_RDNA2 128 #define NWARPS_Q4_1_RDNA2 8 #define MMQ_X_Q4_1_RDNA1 64 #define MMQ_Y_Q4_1_RDNA1 64 #define NWARPS_Q4_1_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q4_1_AMPERE 4 #define MMQ_Y_Q4_1_AMPERE 32 #define NWARPS_Q4_1_AMPERE 4 #else #define MMQ_X_Q4_1_AMPERE 64 #define MMQ_Y_Q4_1_AMPERE 128 #define NWARPS_Q4_1_AMPERE 4 #endif #define MMQ_X_Q4_1_PASCAL 64 #define MMQ_Y_Q4_1_PASCAL 64 #define NWARPS_Q4_1_PASCAL 8 #define MMQ_X_Q5_0_RDNA2 64 #define MMQ_Y_Q5_0_RDNA2 128 #define NWARPS_Q5_0_RDNA2 8 #define MMQ_X_Q5_0_RDNA1 64 #define MMQ_Y_Q5_0_RDNA1 64 #define NWARPS_Q5_0_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q5_0_AMPERE 4 #define MMQ_Y_Q5_0_AMPERE 32 #define NWARPS_Q5_0_AMPERE 4 #else #define MMQ_X_Q5_0_AMPERE 128 #define MMQ_Y_Q5_0_AMPERE 64 #define NWARPS_Q5_0_AMPERE 4 #endif #define MMQ_X_Q5_0_PASCAL 64 #define MMQ_Y_Q5_0_PASCAL 64 #define NWARPS_Q5_0_PASCAL 8 #define MMQ_X_Q5_1_RDNA2 64 #define MMQ_Y_Q5_1_RDNA2 128 #define NWARPS_Q5_1_RDNA2 8 #define MMQ_X_Q5_1_RDNA1 64 #define MMQ_Y_Q5_1_RDNA1 64 #define NWARPS_Q5_1_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q5_1_AMPERE 4 #define MMQ_Y_Q5_1_AMPERE 32 #define NWARPS_Q5_1_AMPERE 4 #else #define MMQ_X_Q5_1_AMPERE 128 #define MMQ_Y_Q5_1_AMPERE 64 #define NWARPS_Q5_1_AMPERE 4 #endif #define MMQ_X_Q5_1_PASCAL 64 #define MMQ_Y_Q5_1_PASCAL 64 #define NWARPS_Q5_1_PASCAL 8 #define MMQ_X_Q8_0_RDNA2 64 #define MMQ_Y_Q8_0_RDNA2 128 #define NWARPS_Q8_0_RDNA2 8 #define MMQ_X_Q8_0_RDNA1 64 #define MMQ_Y_Q8_0_RDNA1 64 #define NWARPS_Q8_0_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q8_0_AMPERE 4 #define MMQ_Y_Q8_0_AMPERE 32 #define NWARPS_Q8_0_AMPERE 4 #else #define MMQ_X_Q8_0_AMPERE 128 #define MMQ_Y_Q8_0_AMPERE 64 #define NWARPS_Q8_0_AMPERE 4 #endif #define MMQ_X_Q8_0_PASCAL 64 #define MMQ_Y_Q8_0_PASCAL 64 #define NWARPS_Q8_0_PASCAL 8 #define MMQ_X_Q2_K_RDNA2 64 #define MMQ_Y_Q2_K_RDNA2 128 #define NWARPS_Q2_K_RDNA2 8 #define MMQ_X_Q2_K_RDNA1 128 #define MMQ_Y_Q2_K_RDNA1 32 #define NWARPS_Q2_K_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q2_K_AMPERE 4 #define MMQ_Y_Q2_K_AMPERE 32 #define NWARPS_Q2_K_AMPERE 4 #else #define MMQ_X_Q2_K_AMPERE 64 #define MMQ_Y_Q2_K_AMPERE 128 #define NWARPS_Q2_K_AMPERE 4 #endif #define MMQ_X_Q2_K_PASCAL 64 #define MMQ_Y_Q2_K_PASCAL 64 #define NWARPS_Q2_K_PASCAL 8 #define MMQ_X_Q3_K_RDNA2 128 #define MMQ_Y_Q3_K_RDNA2 64 #define NWARPS_Q3_K_RDNA2 8 #define MMQ_X_Q3_K_RDNA1 32 #define MMQ_Y_Q3_K_RDNA1 128 #define NWARPS_Q3_K_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q3_K_AMPERE 4 #define MMQ_Y_Q3_K_AMPERE 32 #define NWARPS_Q3_K_AMPERE 4 #else #define MMQ_X_Q3_K_AMPERE 128 #define MMQ_Y_Q3_K_AMPERE 128 #define NWARPS_Q3_K_AMPERE 4 #endif #define MMQ_X_Q3_K_PASCAL 64 #define MMQ_Y_Q3_K_PASCAL 64 #define NWARPS_Q3_K_PASCAL 8 #define MMQ_X_Q4_K_RDNA2 64 #define MMQ_Y_Q4_K_RDNA2 128 #define NWARPS_Q4_K_RDNA2 8 #define MMQ_X_Q4_K_RDNA1 32 #define MMQ_Y_Q4_K_RDNA1 64 #define NWARPS_Q4_K_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q4_K_AMPERE 4 #define MMQ_Y_Q4_K_AMPERE 32 #define NWARPS_Q4_K_AMPERE 4 #else #define MMQ_X_Q4_K_AMPERE 64 #define MMQ_Y_Q4_K_AMPERE 128 #define NWARPS_Q4_K_AMPERE 4 #endif #define MMQ_X_Q4_K_PASCAL 64 #define MMQ_Y_Q4_K_PASCAL 64 #define NWARPS_Q4_K_PASCAL 8 #define MMQ_X_Q5_K_RDNA2 64 #define MMQ_Y_Q5_K_RDNA2 128 #define NWARPS_Q5_K_RDNA2 8 #define MMQ_X_Q5_K_RDNA1 32 #define MMQ_Y_Q5_K_RDNA1 64 #define NWARPS_Q5_K_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q5_K_AMPERE 4 #define MMQ_Y_Q5_K_AMPERE 32 #define NWARPS_Q5_K_AMPERE 4 #else #define MMQ_X_Q5_K_AMPERE 64 #define MMQ_Y_Q5_K_AMPERE 128 #define NWARPS_Q5_K_AMPERE 4 #endif #define MMQ_X_Q5_K_PASCAL 64 #define MMQ_Y_Q5_K_PASCAL 64 #define NWARPS_Q5_K_PASCAL 8 #define MMQ_X_Q6_K_RDNA2 64 #define MMQ_Y_Q6_K_RDNA2 128 #define NWARPS_Q6_K_RDNA2 8 #define MMQ_X_Q6_K_RDNA1 32 #define MMQ_Y_Q6_K_RDNA1 64 #define NWARPS_Q6_K_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q6_K_AMPERE 4 #define MMQ_Y_Q6_K_AMPERE 32 #define NWARPS_Q6_K_AMPERE 4 #else #define MMQ_X_Q6_K_AMPERE 64 #define MMQ_Y_Q6_K_AMPERE 64 #define NWARPS_Q6_K_AMPERE 4 #endif #define MMQ_X_Q6_K_PASCAL 64 #define MMQ_Y_Q6_K_PASCAL 64 #define NWARPS_Q6_K_PASCAL 8 // QK = number of values after dequantization // QR = QK / number of values before dequantization // QI = number of 32 bit integers before dequantization #define QK4_0 32 #define QR4_0 2 #define QI4_0 (QK4_0 / (4 * QR4_0)) typedef struct { half d; // delta uint8_t qs[QK4_0 / 2]; // nibbles / quants } block_q4_0; static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding"); #define QK4_1 32 #define QR4_1 2 #define QI4_1 (QK4_1 / (4 * QR4_1)) typedef struct { half2 dm; // dm.x = delta, dm.y = min uint8_t qs[QK4_1 / 2]; // nibbles / quants } block_q4_1; static_assert(sizeof(block_q4_1) == sizeof(ggml_fp16_t) * 2 + QK4_1 / 2, "wrong q4_1 block size/padding"); #define QK5_0 32 #define QR5_0 2 #define QI5_0 (QK5_0 / (4 * QR5_0)) typedef struct { half d; // delta uint8_t qh[4]; // 5-th bit of quants uint8_t qs[QK5_0 / 2]; // nibbles / quants } block_q5_0; static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding"); #define QK5_1 32 #define QR5_1 2 #define QI5_1 (QK5_1 / (4 * QR5_1)) typedef struct { half2 dm; // dm.x = delta, dm.y = min uint8_t qh[4]; // 5-th bit of quants uint8_t qs[QK5_1 / 2]; // nibbles / quants } block_q5_1; static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding"); #define QK8_0 32 #define QR8_0 1 #define QI8_0 (QK8_0 / (4 * QR8_0)) typedef struct { half d; // delta int8_t qs[QK8_0]; // quants } block_q8_0; static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding"); #define QK8_1 32 #define QR8_1 1 #define QI8_1 (QK8_1 / (4 * QR8_1)) typedef struct { half2 ds; // ds.x = delta, ds.y = sum int8_t qs[QK8_0]; // quants } block_q8_1; static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_fp16_t) + QK8_0, "wrong q8_1 block size/padding"); typedef float (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs); typedef void (*allocate_tiles_cuda_t)(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc); typedef void (*load_tiles_cuda_t)( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row); typedef float (*vec_dot_q_mul_mat_cuda_t)( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ms, const int & i, const int & j, const int & k); #define QR2_K 4 #define QI2_K (QK_K / (4*QR2_K)) typedef struct { uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits uint8_t qs[QK_K/4]; // quants half2 dm; // super-block scale for quantized scales/mins } block_q2_K; static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_fp16_t) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding"); #define QR3_K 4 #define QI3_K (QK_K / (4*QR3_K)) typedef struct { uint8_t hmask[QK_K/8]; // quants - high bit uint8_t qs[QK_K/4]; // quants - low 2 bits #ifdef GGML_QKK_64 uint8_t scales[2]; // scales, quantized with 8 bits #else uint8_t scales[K_SCALE_SIZE]; // scales, quantized with 6 bits #endif half d; // super-block scale } block_q3_K; //static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + K_SCALE_SIZE, "wrong q3_K block size/padding"); #define QR4_K 2 #define QI4_K (QK_K / (4*QR4_K)) #ifdef GGML_QKK_64 typedef struct { half dm[2]; // super-block scales/mins uint8_t scales[2]; // 4-bit block scales/mins uint8_t qs[QK_K/2]; // 4--bit quants } block_q4_K; static_assert(sizeof(block_q4_K) == sizeof(half2) + QK_K/2 + 2, "wrong q4_K block size/padding"); #else typedef struct { half2 dm; // super-block scale for quantized scales/mins uint8_t scales[3*QK_K/64]; // scales, quantized with 6 bits uint8_t qs[QK_K/2]; // 4--bit quants } block_q4_K; static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + 3*QK_K/64 + QK_K/2, "wrong q4_K block size/padding"); #endif #define QR5_K 2 #define QI5_K (QK_K / (4*QR5_K)) #ifdef GGML_QKK_64 typedef struct { half d; // super-block scale int8_t scales[QK_K/16]; // block scales uint8_t qh[QK_K/8]; // quants, high bit uint8_t qs[QK_K/2]; // quants, low 4 bits } block_q5_K; static_assert(sizeof(block_q5_K) == sizeof(ggml_fp16_t) + QK_K/2 + QK_K/8 + QK_K/16, "wrong q5_K block size/padding"); #else typedef struct { half2 dm; // super-block scale for quantized scales/mins uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits uint8_t qh[QK_K/8]; // quants, high bit uint8_t qs[QK_K/2]; // quants, low 4 bits } block_q5_K; static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding"); #endif #define QR6_K 2 #define QI6_K (QK_K / (4*QR6_K)) typedef struct { uint8_t ql[QK_K/2]; // quants, lower 4 bits uint8_t qh[QK_K/4]; // quants, upper 2 bits int8_t scales[QK_K/16]; // scales half d; // delta } block_q6_K; static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + 13*QK_K/16, "wrong q6_K block size/padding"); // In llama.cpp this is only used for intermediate quantization and dot products typedef struct { float d; // delta int8_t qs[QK_K]; // quants int16_t bsums[QK_K/16]; // sum of quants in groups of 16 } block_q8_K; static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding"); template <int qk, int qr, int qi, bool need_sum, typename block_q_t, int mmq_x, int mmq_y, int nwarps, allocate_tiles_cuda_t allocate_tiles, load_tiles_cuda_t load_tiles, int vdr, vec_dot_q_mul_mat_cuda_t vec_dot> static __device__ __forceinline__ void mul_mat_q( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const block_q_t * x = (const block_q_t *) vx; const block_q8_1 * y = (const block_q8_1 *) vy; const int blocks_per_row_x = ncols_x / qk; const int blocks_per_col_y = nrows_y / QK8_1; const int blocks_per_warp = WARP_SIZE / qi; const int & ncols_dst = ncols_y; const int row_dst_0 = blockIdx.x*mmq_y; const int & row_x_0 = row_dst_0; const int col_dst_0 = blockIdx.y*mmq_x; const int & col_y_0 = col_dst_0; int * tile_x_ql = nullptr; half2 * tile_x_dm = nullptr; int * tile_x_qh = nullptr; int * tile_x_sc = nullptr; allocate_tiles(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc); __shared__ int tile_y_qs[mmq_x * WARP_SIZE]; __shared__ half2 tile_y_ds[mmq_x * WARP_SIZE/QI8_1]; float sum[mmq_y/WARP_SIZE][mmq_x/nwarps] = {{0.0f}}; for (int ib0 = 0; ib0 < blocks_per_row_x; ib0 += blocks_per_warp) { load_tiles(x + row_x_0*blocks_per_row_x + ib0, tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc, threadIdx.y, nrows_x-row_x_0-1, threadIdx.x, blocks_per_row_x); #pragma unroll for (int ir = 0; ir < qr; ++ir) { const int kqs = ir*WARP_SIZE + threadIdx.x; const int kbxd = kqs / QI8_1; #pragma unroll for (int i = 0; i < mmq_x; i += nwarps) { const int col_y_eff = min(col_y_0 + threadIdx.y + i, ncols_y-1); // to prevent out-of-bounds memory accesses const block_q8_1 * by0 = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + kbxd]; const int index_y = (threadIdx.y + i) * WARP_SIZE + kqs % WARP_SIZE; tile_y_qs[index_y] = get_int_from_int8_aligned(by0->qs, threadIdx.x % QI8_1); } #pragma unroll for (int ids0 = 0; ids0 < mmq_x; ids0 += nwarps * QI8_1) { const int ids = (ids0 + threadIdx.y * QI8_1 + threadIdx.x / (WARP_SIZE/QI8_1)) % mmq_x; const int kby = threadIdx.x % (WARP_SIZE/QI8_1); const int col_y_eff = min(col_y_0 + ids, ncols_y-1); // if the sum is not needed it's faster to transform the scale to f32 ahead of time const half2 * dsi_src = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + ir*(WARP_SIZE/QI8_1) + kby].ds; half2 * dsi_dst = &tile_y_ds[ids * (WARP_SIZE/QI8_1) + kby]; if (need_sum) { *dsi_dst = *dsi_src; } else { float * dfi_dst = (float *) dsi_dst; *dfi_dst = __low2half(*dsi_src); } } __syncthreads(); // #pragma unroll // unrolling this loop causes too much register pressure for (int k = ir*WARP_SIZE/qr; k < (ir+1)*WARP_SIZE/qr; k += vdr) { #pragma unroll for (int j = 0; j < mmq_x; j += nwarps) { #pragma unroll for (int i = 0; i < mmq_y; i += WARP_SIZE) { sum[i/WARP_SIZE][j/nwarps] += vec_dot( tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc, tile_y_qs, tile_y_ds, threadIdx.x + i, threadIdx.y + j, k); } } } __syncthreads(); } } #pragma unroll for (int j = 0; j < mmq_x; j += nwarps) { const int col_dst = col_dst_0 + j + threadIdx.y; if (col_dst >= ncols_dst) { return; } #pragma unroll for (int i = 0; i < mmq_y; i += WARP_SIZE) { const int row_dst = row_dst_0 + threadIdx.x + i; if (row_dst >= nrows_dst) { continue; } dst[col_dst*nrows_dst + row_dst] = sum[i/WARP_SIZE][j/nwarps]; } } } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_0( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { (void)x_qh; (void)x_sc; const int kbx = k / QI4_0; const int kqsx = k % QI4_0; const block_q4_0 * bx0 = (const block_q4_0 *) vx; float * x_dmf = (float *) x_dm; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbx; x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx); // x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbx] = bxi->d; } const int blocks_per_tile_x_row = WARP_SIZE / QI4_0; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_0) { int i = i0 + i_offset * QI4_0 + k / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); } const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbxd; x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbxd] = bxi->d; } } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_1( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); GGML_CUDA_ASSUME(i_offset >= 0); GGML_CUDA_ASSUME(i_offset < nwarps); GGML_CUDA_ASSUME(k >= 0); GGML_CUDA_ASSUME(k < WARP_SIZE); const int kbx = k / QI4_1; const int kqsx = k % QI4_1; const block_q4_1 * bx0 = (const block_q4_1 *) vx; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbx; x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); } const int blocks_per_tile_x_row = WARP_SIZE / QI4_1; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_1) { int i = i0 + i_offset * QI4_1 + k / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); } const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbxd; x_dm[i * (WARP_SIZE/QI4_1) + i / QI4_1 + kbxd] = bxi->dm; } } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { (void)x_qh; (void)x_sc; __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y]; __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI4_0) + mmq_y/QI4_0]; *x_ql = tile_x_qs; *x_dm = (half2 *) tile_x_d; } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + + mmq_y]; __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_1) + mmq_y/QI4_1]; *x_ql = tile_x_qs; *x_dm = tile_x_dm; } static __device__ __forceinline__ void dequantize_q4_0(const void * vx, const int ib, const int iqs, dfloat2 & v){ const block_q4_0 * x = (const block_q4_0 *) vx; const dfloat d = x[ib].d; const int vui = x[ib].qs[iqs]; v.x = vui & 0xF; v.y = vui >> 4; #ifdef GGML_CUDA_F16 v = __hsub2(v, {8.0f, 8.0f}); v = __hmul2(v, {d, d}); #else v.x = (v.x - 8.0f) * d; v.y = (v.y - 8.0f) * d; #endif // GGML_CUDA_F16 } static __device__ __forceinline__ void dequantize_q4_1(const void * vx, const int ib, const int iqs, dfloat2 & v){ const block_q4_1 * x = (const block_q4_1 *) vx; const dfloat d = __low2half(x[ib].dm); const dfloat m = __high2half(x[ib].dm); const int vui = x[ib].qs[iqs]; v.x = vui & 0xF; v.y = vui >> 4; #ifdef GGML_CUDA_F16 v = __hmul2(v, {d, d}); v = __hadd2(v, {m, m}); #else v.x = (v.x * d) + m; v.y = (v.y * d) + m; #endif // GGML_CUDA_F16 } static __device__ __forceinline__ void dequantize_q5_0(const void * vx, const int ib, const int iqs, dfloat2 & v){ const block_q5_0 * x = (const block_q5_0 *) vx; const dfloat d = x[ib].d; uint32_t qh; memcpy(&qh, x[ib].qh, sizeof(qh)); const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10; const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10; v.x = ((x[ib].qs[iqs] & 0xf) | xh_0); v.y = ((x[ib].qs[iqs] >> 4) | xh_1); #ifdef GGML_CUDA_F16 v = __hsub2(v, {16.0f, 16.0f}); v = __hmul2(v, {d, d}); #else v.x = (v.x - 16.0f) * d; v.y = (v.y - 16.0f) * d; #endif // GGML_CUDA_F16 } static __device__ __forceinline__ void dequantize_q5_1(const void * vx, const int ib, const int iqs, dfloat2 & v){ const block_q5_1 * x = (const block_q5_1 *) vx; const dfloat d = __low2half(x[ib].dm); const dfloat m = __high2half(x[ib].dm); uint32_t qh; memcpy(&qh, x[ib].qh, sizeof(qh)); const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10; const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10; v.x = ((x[ib].qs[iqs] & 0xf) | xh_0); v.y = ((x[ib].qs[iqs] >> 4) | xh_1); #ifdef GGML_CUDA_F16 v = __hmul2(v, {d, d}); v = __hadd2(v, {m, m}); #else v.x = (v.x * d) + m; v.y = (v.y * d) + m; #endif // GGML_CUDA_F16 } static __device__ __forceinline__ void dequantize_q8_0(const void * vx, const int ib, const int iqs, dfloat2 & v){ const block_q8_0 * x = (const block_q8_0 *) vx; const dfloat d = x[ib].d; v.x = x[ib].qs[iqs + 0]; v.y = x[ib].qs[iqs + 1]; #ifdef GGML_CUDA_F16 v = __hmul2(v, {d, d}); #else v.x *= d; v.y *= d; #endif // GGML_CUDA_F16 } template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t> static __device__ void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, const int k) { const int i = 2*(blockDim.x*blockIdx.x + threadIdx.x); if (i >= k) { return; } const int ib = i/qk; // block index const int iqs = (i%qk)/qr; // quant index const int iybs = i - i%qk; // y block start index const int y_offset = qr == 1 ? 1 : qk/2; // dequantize dfloat2 v; dequantize_kernel(vx, ib, iqs, v); y[iybs + iqs + 0] = v.x; y[iybs + iqs + y_offset] = v.y; } template<typename dst_t> static __device__ void dequantize_block_q4_0(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) { const int64_t i = blockIdx.x; // assume 32 threads const int tid = threadIdx.x; const int il = tid/8; const int ir = tid%8; const int64_t ib = 8*i + ir; if (ib >= nb32) { return; } dst_t * y = yy + 256*i + 32*ir + 4*il; const block_q4_0 * x = (const block_q4_0 *)vx + ib; const float d = __half2float(x->d); const float dm = -8*d; const uint8_t * q = x->qs + 4*il; for (int l = 0; l < 4; ++l) { y[l+ 0] = d * (q[l] & 0xF) + dm; y[l+16] = d * (q[l] >> 4) + dm; } } template<typename dst_t> static __device__ void dequantize_block_q4_1(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) { const int64_t i = blockIdx.x; // assume 32 threads const int tid = threadIdx.x; const int il = tid/8; const int ir = tid%8; const int64_t ib = 8*i + ir; if (ib >= nb32) { return; } dst_t * y = yy + 256*i + 32*ir + 4*il; const block_q4_1 * x = (const block_q4_1 *)vx + ib; const float2 d = __half22float2(x->dm); const uint8_t * q = x->qs + 4*il; for (int l = 0; l < 4; ++l) { y[l+ 0] = d.x * (q[l] & 0xF) + d.y; y[l+16] = d.x * (q[l] >> 4) + d.y; } } //================================== k-quants template<typename dst_t> static __device__ void dequantize_block_q2_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { const int i = blockIdx.x; const block_q2_K * x = (const block_q2_K *) vx; const int tid = threadIdx.x; #if QK_K == 256 const int n = tid/32; const int l = tid - 32*n; const int is = 8*n + l/16; const uint8_t q = x[i].qs[32*n + l]; dst_t * y = yy + i*QK_K + 128*n; float dall = __low2half(x[i].dm); float dmin = __high2half(x[i].dm); y[l+ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4); y[l+32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 2) & 3) - dmin * (x[i].scales[is+2] >> 4); y[l+64] = dall * (x[i].scales[is+4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+4] >> 4); y[l+96] = dall * (x[i].scales[is+6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is+6] >> 4); #else const int is = tid/16; // 0 or 1 const int il = tid%16; // 0...15 const uint8_t q = x[i].qs[il] >> (2*is); dst_t * y = yy + i*QK_K + 16*is + il; float dall = __low2half(x[i].dm); float dmin = __high2half(x[i].dm); y[ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4); y[32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+2] >> 4); #endif } template<typename dst_t> static __device__ void dequantize_block_q3_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { const int i = blockIdx.x; const block_q3_K * x = (const block_q3_K *) vx; #if QK_K == 256 const int r = threadIdx.x/4; const int tid = r/2; const int is0 = r%2; const int l0 = 16*is0 + 4*(threadIdx.x%4); const int n = tid / 4; const int j = tid - 4*n; uint8_t m = 1 << (4*n + j); int is = 8*n + 2*j + is0; int shift = 2*j; int8_t us = is < 4 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+8] >> 0) & 3) << 4) : is < 8 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+4] >> 2) & 3) << 4) : is < 12 ? (x[i].scales[is-8] >> 4) | (((x[i].scales[is+0] >> 4) & 3) << 4) : (x[i].scales[is-8] >> 4) | (((x[i].scales[is-4] >> 6) & 3) << 4); float d_all = x[i].d; float dl = d_all * (us - 32); dst_t * y = yy + i*QK_K + 128*n + 32*j; const uint8_t * q = x[i].qs + 32*n; const uint8_t * hm = x[i].hmask; for (int l = l0; l < l0+4; ++l) y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4)); #else const int tid = threadIdx.x; const int is = tid/16; // 0 or 1 const int il = tid%16; // 0...15 const int im = il/8; // 0...1 const int in = il%8; // 0...7 dst_t * y = yy + i*QK_K + 16*is + il; const uint8_t q = x[i].qs[il] >> (2*is); const uint8_t h = x[i].hmask[in] >> (2*is + im); const float d = (float)x[i].d; if (is == 0) { y[ 0] = d * ((x[i].scales[0] & 0xF) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4)); y[32] = d * ((x[i].scales[1] & 0xF) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4)); } else { y[ 0] = d * ((x[i].scales[0] >> 4) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4)); y[32] = d * ((x[i].scales[1] >> 4) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4)); } #endif } #if QK_K == 256 static inline __device__ void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8_t & m) { if (j < 4) { d = q[j] & 63; m = q[j + 4] & 63; } else { d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4); m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4); } } #endif template<typename dst_t> static __device__ void dequantize_block_q4_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { const block_q4_K * x = (const block_q4_K *) vx; const int i = blockIdx.x; #if QK_K == 256 // assume 32 threads const int tid = threadIdx.x; const int il = tid/8; const int ir = tid%8; const int is = 2*il; const int n = 4; dst_t * y = yy + i*QK_K + 64*il + n*ir; const float dall = __low2half(x[i].dm); const float dmin = __high2half(x[i].dm); const uint8_t * q = x[i].qs + 32*il + n*ir; uint8_t sc, m; get_scale_min_k4(is + 0, x[i].scales, sc, m); const float d1 = dall * sc; const float m1 = dmin * m; get_scale_min_k4(is + 1, x[i].scales, sc, m); const float d2 = dall * sc; const float m2 = dmin * m; for (int l = 0; l < n; ++l) { y[l + 0] = d1 * (q[l] & 0xF) - m1; y[l +32] = d2 * (q[l] >> 4) - m2; } #else const int tid = threadIdx.x; const uint8_t * q = x[i].qs; dst_t * y = yy + i*QK_K; const float d = (float)x[i].dm[0]; const float m = (float)x[i].dm[1]; y[tid+ 0] = d * (x[i].scales[0] & 0xF) * (q[tid] & 0xF) - m * (x[i].scales[0] >> 4); y[tid+32] = d * (x[i].scales[1] & 0xF) * (q[tid] >> 4) - m * (x[i].scales[1] >> 4); #endif } template<typename dst_t> static __device__ void dequantize_block_q5_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { const block_q5_K * x = (const block_q5_K *) vx; const int i = blockIdx.x; #if QK_K == 256 // assume 64 threads - this is very slightly better than the one below const int tid = threadIdx.x; const int il = tid/16; // il is in 0...3 const int ir = tid%16; // ir is in 0...15 const int is = 2*il; // is is in 0...6 dst_t * y = yy + i*QK_K + 64*il + 2*ir; const float dall = __low2half(x[i].dm); const float dmin = __high2half(x[i].dm); const uint8_t * ql = x[i].qs + 32*il + 2*ir; const uint8_t * qh = x[i].qh + 2*ir; uint8_t sc, m; get_scale_min_k4(is + 0, x[i].scales, sc, m); const float d1 = dall * sc; const float m1 = dmin * m; get_scale_min_k4(is + 1, x[i].scales, sc, m); const float d2 = dall * sc; const float m2 = dmin * m; uint8_t hm = 1 << (2*il); y[ 0] = d1 * ((ql[ 0] & 0xF) + (qh[ 0] & hm ? 16 : 0)) - m1; y[ 1] = d1 * ((ql[ 1] & 0xF) + (qh[ 1] & hm ? 16 : 0)) - m1; hm <<= 1; y[32] = d2 * ((ql[ 0] >> 4) + (qh[ 0] & hm ? 16 : 0)) - m2; y[33] = d2 * ((ql[ 1] >> 4) + (qh[ 1] & hm ? 16 : 0)) - m2; #else const int tid = threadIdx.x; const uint8_t q = x[i].qs[tid]; const int im = tid/8; // 0...3 const int in = tid%8; // 0...7 const int is = tid/16; // 0 or 1 const uint8_t h = x[i].qh[in] >> im; const float d = x[i].d; dst_t * y = yy + i*QK_K + tid; y[ 0] = d * x[i].scales[is+0] * ((q & 0xF) - ((h >> 0) & 1 ? 0 : 16)); y[32] = d * x[i].scales[is+2] * ((q >> 4) - ((h >> 4) & 1 ? 0 : 16)); #endif } template<typename dst_t> static __device__ void dequantize_block_q6_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { const block_q6_K * x = (const block_q6_K *) vx; const int64_t i = blockIdx.x; #if QK_K == 256 // assume 64 threads - this is very slightly better than the one below const int64_t tid = threadIdx.x; const int64_t ip = tid/32; // ip is 0 or 1 const int64_t il = tid - 32*ip; // 0...32 const int64_t is = 8*ip + il/16; dst_t * y = yy + i*QK_K + 128*ip + il; const float d = x[i].d; const uint8_t * ql = x[i].ql + 64*ip + il; const uint8_t qh = x[i].qh[32*ip + il]; const int8_t * sc = x[i].scales + is; y[ 0] = d * sc[0] * ((int8_t)((ql[ 0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32); y[32] = d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32); y[64] = d * sc[4] * ((int8_t)((ql[ 0] >> 4) | (((qh >> 4) & 3) << 4)) - 32); y[96] = d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32); #else // assume 32 threads const int64_t tid = threadIdx.x; const int64_t ip = tid/16; // 0 or 1 const int64_t il = tid - 16*ip; // 0...15 dst_t * y = yy + i*QK_K + 16*ip + il; const float d = x[i].d; const uint8_t ql = x[i].ql[16*ip + il]; const uint8_t qh = x[i].qh[il] >> (2*ip); const int8_t * sc = x[i].scales; y[ 0] = d * sc[ip+0] * ((int8_t)((ql & 0xF) | (((qh >> 0) & 3) << 4)) - 32); y[32] = d * sc[ip+2] * ((int8_t)((ql >> 4) | (((qh >> 4) & 3) << 4)) - 32); #endif } template<typename dst_t> static __device__ void dequantize_block_q8_0(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) { const int i = blockIdx.x; // assume 32 threads const int tid = threadIdx.x; const int il = tid/8; const int ir = tid%8; const int ib = 8*i + ir; if (ib >= nb32) { return; } dst_t * y = yy + 256*i + 32*ir + 8*il; const block_q8_0 * x = (const block_q8_0 *)vx + ib; const float d = __half2float(x->d); const int8_t * q = x->qs + 8*il; for (int l = 0; l < 8; ++l) { y[l] = d * q[l]; } } template<typename dst_t> static __device__ void dequantize_block_q8_K(const void * __restrict__ vx, dst_t * __restrict__ yy) { const block_q8_K * x = (const block_q8_K *) vx; const int i = blockIdx.x; #if QK_K == 256 // assume 32 threads const int tid = threadIdx.x; const int il = tid/8; const int ir = tid%8; const int n = 8; dst_t * y = yy + i*QK_K + 64*il + n*ir; const int8_t * q = x[i].qs + 64*il + n*ir; for (int l = 0; l < n; ++l) { y[l] = q[l] * x[i].d; } #else const int tid = threadIdx.x; const uint8_t * q = x[i].qs; float * y = yy + i*QK_K; y[tid] = x[i].d * x[i].scales[0]; #endif } template<typename dst_t> static __device__ void dequantize_block_q5_0(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) { return dequantize_block<QK5_0, QR5_0, dequantize_q5_0>(vx, yy, nb32); } template<typename dst_t> static __device__ void dequantize_block_q5_1(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) { return dequantize_block<QK5_1, QR5_1, dequantize_q5_1>(vx, yy, nb32); } #define DEQUANTIZE_K(QNAME) \ extern "C" __global__ void dequantize_block_##QNAME##_f32(const void * __restrict__ vx, float * __restrict__ y) { \ dequantize_block_##QNAME(vx, y); \ } \ extern "C" __global__ void dequantize_block_##QNAME##_f16(const void * __restrict__ vx, half * __restrict__ y) { \ dequantize_block_##QNAME(vx, y); \ } \ #define DEQUANTIZE(QNAME) \ extern "C" __global__ void dequantize_block_##QNAME##_f32(const void * __restrict__ vx, float * __restrict__ y, const int k) { \ dequantize_block_##QNAME(vx, y, k); \ } \ extern "C" __global__ void dequantize_block_##QNAME##_f16(const void * __restrict__ vx, half * __restrict__ y, const int k) { \ dequantize_block_##QNAME(vx, y, k); \ } \ DEQUANTIZE_K(q2_K) DEQUANTIZE_K(q3_K) DEQUANTIZE_K(q4_K) DEQUANTIZE_K(q5_K) DEQUANTIZE_K(q6_K) DEQUANTIZE_K(q8_K) DEQUANTIZE(q4_0) DEQUANTIZE(q4_1) DEQUANTIZE(q5_0) DEQUANTIZE(q5_1) DEQUANTIZE(q8_0) template <int qk, int qr, dequantize_kernel_t dequantize_kernel> static __device__ void dequantize_mul_mat_vec(const void * __restrict__ vx, const dfloat * __restrict__ y, float * __restrict__ dst, const int ncols, const int nrows) { // qk = quantized weights per x block // qr = number of quantized weights per data value in x block const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row >= nrows) { return; } const int tid = threadIdx.x; const int iter_stride = 2*GGML_CUDA_DMMV_X; const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter const int y_offset = qr == 1 ? 1 : qk/2; // partial sum for each thread #ifdef GGML_CUDA_F16 half2 tmp = {0.0f, 0.0f}; // two sums for f16 to take advantage of half2 intrinsics #else float tmp = 0.0f; #endif // GGML_CUDA_F16 for (int i = 0; i < ncols; i += iter_stride) { const int col = i + vals_per_iter*tid; const int ib = (row*ncols + col)/qk; // x block index const int iqs = (col%qk)/qr; // x quant index const int iybs = col - col%qk; // y block start index // processing >2 values per i iter is faster for fast GPUs #pragma unroll for (int j = 0; j < vals_per_iter; j += 2) { // process 2 vals per j iter // dequantize // for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val dfloat2 v; dequantize_kernel(vx, ib, iqs + j/qr, v); // matrix multiplication // for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2 #ifdef GGML_CUDA_F16 tmp += __hmul2(v, { y[iybs + iqs + j/qr + 0], y[iybs + iqs + j/qr + y_offset] }); #else tmp += v.x * y[iybs + iqs + j/qr + 0]; tmp += v.y * y[iybs + iqs + j/qr + y_offset]; #endif // GGML_CUDA_F16 } } // sum up partial sums and write back result #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } if (tid == 0) { #ifdef GGML_CUDA_F16 dst[row] = tmp.x + tmp.y; #else dst[row] = tmp; #endif // GGML_CUDA_F16 } } extern "C" __global__ void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) { dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>(vx, y, dst, ncols, nrows); } extern "C" __global__ void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) { dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>(vx, y, dst, ncols, nrows); } extern "C" __global__ void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) { dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>(vx, y, dst, ncols, nrows); } extern "C" __global__ void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) { dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>(vx, y, dst, ncols, nrows); } extern "C" __global__ void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) { dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>(vx, y, dst, ncols, nrows); } extern "C" __global__ void dequantize_mul_mat_vec_q2_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION"); const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q2_K * x = (const block_q2_K *)vx + ib0; float tmp = 0; // partial sum for thread in warp #if QK_K == 256 const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...15 const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1 const int step = 16/K_QUANTS_PER_ITERATION; const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... const int in = tid - step*im; // 0...15 or 0...7 const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 or 0...14 in steps of 2 const int q_offset = 32*im + l0; const int s_offset = 8*im; const int y_offset = 128*im + l0; uint32_t aux[4]; const uint8_t * d = (const uint8_t *)aux; const uint8_t * m = (const uint8_t *)(aux + 2); for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + y_offset; const uint8_t * q = x[i].qs + q_offset; const float dall = __low2half(x[i].dm); const float dmin = __high2half(x[i].dm); const uint32_t * a = (const uint32_t *)(x[i].scales + s_offset); aux[0] = a[0] & 0x0f0f0f0f; aux[1] = a[1] & 0x0f0f0f0f; aux[2] = (a[0] >> 4) & 0x0f0f0f0f; aux[3] = (a[1] >> 4) & 0x0f0f0f0f; float sum1 = 0, sum2 = 0; for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { sum1 += y[l+ 0] * d[0] * ((q[l+ 0] >> 0) & 3) + y[l+32] * d[2] * ((q[l+ 0] >> 2) & 3) + y[l+64] * d[4] * ((q[l+ 0] >> 4) & 3) + y[l+96] * d[6] * ((q[l+ 0] >> 6) & 3) + y[l+16] * d[1] * ((q[l+16] >> 0) & 3) + y[l+48] * d[3] * ((q[l+16] >> 2) & 3) + y[l+80] * d[5] * ((q[l+16] >> 4) & 3) +y[l+112] * d[7] * ((q[l+16] >> 6) & 3); sum2 += y[l+ 0] * m[0] + y[l+32] * m[2] + y[l+64] * m[4] + y[ l+96] * m[6] + y[l+16] * m[1] + y[l+48] * m[3] + y[l+80] * m[5] + y[l+112] * m[7]; } tmp += dall * sum1 - dmin * sum2; } #else const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7 const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3 const int offset = tid * K_QUANTS_PER_ITERATION; uint32_t uaux[2]; const uint8_t * d = (const uint8_t *)uaux; for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + offset; const uint8_t * q = x[i].qs + offset; const uint32_t * s = (const uint32_t *)x[i].scales; uaux[0] = s[0] & 0x0f0f0f0f; uaux[1] = (s[0] >> 4) & 0x0f0f0f0f; const float2 dall = __half22float2(x[i].dm); float sum1 = 0, sum2 = 0; for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { const uint8_t ql = q[l]; sum1 += y[l+ 0] * d[0] * ((ql >> 0) & 3) + y[l+16] * d[1] * ((ql >> 2) & 3) + y[l+32] * d[2] * ((ql >> 4) & 3) + y[l+48] * d[3] * ((ql >> 6) & 3); sum2 += y[l+0] * d[4] + y[l+16] * d[5] + y[l+32] * d[6] + y[l+48] * d[7]; } tmp += dall.x * sum1 - dall.y * sum2; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } if (threadIdx.x == 0) { dst[row] = tmp; } } extern "C" __global__ void dequantize_mul_mat_vec_q3_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q3_K * x = (const block_q3_K *)vx + ib0; float tmp = 0; // partial sum for thread in warp #if QK_K == 256 const uint16_t kmask1 = 0x0303; const uint16_t kmask2 = 0x0f0f; const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1 const int n = K_QUANTS_PER_ITERATION; // iterations in the inner loop const int step = 16/K_QUANTS_PER_ITERATION; const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... const int in = tid - step*im; // 0....15 or 0...7 const uint8_t m = 1 << (4*im); const int l0 = n*in; // 0...15 or 0...14 in steps of 2 const int q_offset = 32*im + l0; const int y_offset = 128*im + l0; uint16_t utmp[4]; const int8_t * s = (const int8_t *)utmp; const uint16_t s_shift = 4*im; for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + y_offset; const uint8_t * q = x[i].qs + q_offset; const uint8_t * h = x[i].hmask + l0; const uint16_t * a = (const uint16_t *)x[i].scales; utmp[0] = ((a[0] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 0)) & kmask1) << 4); utmp[1] = ((a[1] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 0)) & kmask1) << 4); utmp[2] = ((a[2] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 2)) & kmask1) << 4); utmp[3] = ((a[3] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 2)) & kmask1) << 4); const float d = x[i].d; float sum = 0; for (int l = 0; l < n; ++l) { sum += y[l+ 0] * (s[0] - 32) * (((q[l] >> 0) & 3) - (h[l] & (m << 0) ? 0 : 4)) + y[l+32] * (s[2] - 32) * (((q[l] >> 2) & 3) - (h[l] & (m << 1) ? 0 : 4)) + y[l+64] * (s[4] - 32) * (((q[l] >> 4) & 3) - (h[l] & (m << 2) ? 0 : 4)) + y[l+96] * (s[6] - 32) * (((q[l] >> 6) & 3) - (h[l] & (m << 3) ? 0 : 4)); sum += y[l+16] * (s[1] - 32) * (((q[l+16] >> 0) & 3) - (h[l+16] & (m << 0) ? 0 : 4)) + y[l+48] * (s[3] - 32) * (((q[l+16] >> 2) & 3) - (h[l+16] & (m << 1) ? 0 : 4)) + y[l+80] * (s[5] - 32) * (((q[l+16] >> 4) & 3) - (h[l+16] & (m << 2) ? 0 : 4)) + y[l+112] * (s[7] - 32) * (((q[l+16] >> 6) & 3) - (h[l+16] & (m << 3) ? 0 : 4)); } tmp += d * sum; } #else const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7 const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3 const int offset = tid * K_QUANTS_PER_ITERATION; // 0...15 or 0...14 const int in = offset/8; // 0 or 1 const int im = offset%8; // 0...7 for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + offset; const uint8_t * q = x[i].qs + offset; const uint8_t * s = x[i].scales; const float dall = (float)x[i].d; float sum = 0; for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { const uint8_t hl = x[i].hmask[im+l] >> in; const uint8_t ql = q[l]; sum += y[l+ 0] * dall * ((s[0] & 0xF) - 8) * ((int8_t)((ql >> 0) & 3) - ((hl >> 0) & 1 ? 0 : 4)) + y[l+16] * dall * ((s[0] >> 4) - 8) * ((int8_t)((ql >> 2) & 3) - ((hl >> 2) & 1 ? 0 : 4)) + y[l+32] * dall * ((s[1] & 0xF) - 8) * ((int8_t)((ql >> 4) & 3) - ((hl >> 4) & 1 ? 0 : 4)) + y[l+48] * dall * ((s[1] >> 4) - 8) * ((int8_t)((ql >> 6) & 3) - ((hl >> 6) & 1 ? 0 : 4)); } tmp += sum; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } if (threadIdx.x == 0) { dst[row] = tmp; } } extern "C" __global__ void dequantize_mul_mat_vec_q4_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q4_K * x = (const block_q4_K *)vx + ib0; #if QK_K == 256 const uint16_t kmask1 = 0x3f3f; const uint16_t kmask2 = 0x0f0f; const uint16_t kmask3 = 0xc0c0; const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1 const int step = 8/K_QUANTS_PER_ITERATION; // 8 or 4 const int il = tid/step; // 0...3 const int ir = tid - step*il; // 0...7 or 0...3 const int n = 2 * K_QUANTS_PER_ITERATION; // 2 or 4 const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224 const int in = il%2; const int l0 = n*(2*ir + in); const int q_offset = 32*im + l0; const int y_offset = 64*im + l0; uint16_t aux[4]; const uint8_t * sc = (const uint8_t *)aux; #if K_QUANTS_PER_ITERATION == 2 uint32_t q32[4]; const uint8_t * q4 = (const uint8_t *)q32; #else uint16_t q16[4]; const uint8_t * q4 = (const uint8_t *)q16; #endif float tmp = 0; // partial sum for thread in warp for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { const float * y1 = yy + i*QK_K + y_offset; const float * y2 = y1 + 128; const float dall = __low2half(x[i].dm); const float dmin = __high2half(x[i].dm); const uint16_t * a = (const uint16_t *)x[i].scales; aux[0] = a[im+0] & kmask1; aux[1] = a[im+2] & kmask1; aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2); aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2); #if K_QUANTS_PER_ITERATION == 2 const uint32_t * q1 = (const uint32_t *)(x[i].qs + q_offset); const uint32_t * q2 = q1 + 16; q32[0] = q1[0] & 0x0f0f0f0f; q32[1] = q1[0] & 0xf0f0f0f0; q32[2] = q2[0] & 0x0f0f0f0f; q32[3] = q2[0] & 0xf0f0f0f0; float4 s = {0.f, 0.f, 0.f, 0.f}; float smin = 0; for (int l = 0; l < 4; ++l) { s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+ 4]; s.z += y2[l] * q4[l+8]; s.w += y2[l+32] * q4[l+12]; smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7]; } tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin; #else const uint16_t * q1 = (const uint16_t *)(x[i].qs + q_offset); const uint16_t * q2 = q1 + 32; q16[0] = q1[0] & 0x0f0f; q16[1] = q1[0] & 0xf0f0; q16[2] = q2[0] & 0x0f0f; q16[3] = q2[0] & 0xf0f0; float4 s = {0.f, 0.f, 0.f, 0.f}; float smin = 0; for (int l = 0; l < 2; ++l) { s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+2]; s.z += y2[l] * q4[l+4]; s.w += y2[l+32] * q4[l+6]; smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7]; } tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin; #endif } #else const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); const int step = tid * K_QUANTS_PER_ITERATION; uint16_t aux16[2]; const uint8_t * s = (const uint8_t *)aux16; float tmp = 0; for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const uint8_t * q = x[i].qs + step; const float * y = yy + i*QK_K + step; const uint16_t * a = (const uint16_t *)x[i].scales; aux16[0] = a[0] & 0x0f0f; aux16[1] = (a[0] >> 4) & 0x0f0f; const float d = (float)x[i].dm[0]; const float m = (float)x[i].dm[1]; float sum = 0.f; for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) { sum += y[j+ 0] * (d * s[0] * (q[j+ 0] & 0xF) - m * s[2]) + y[j+16] * (d * s[0] * (q[j+16] & 0xF) - m * s[2]) + y[j+32] * (d * s[1] * (q[j+ 0] >> 4) - m * s[3]) + y[j+48] * (d * s[1] * (q[j+16] >> 4) - m * s[3]); } tmp += sum; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } if (tid == 0) { dst[row] = tmp; } } extern "C" __global__ void dequantize_mul_mat_vec_q5_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols) { const int row = blockIdx.x; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q5_K * x = (const block_q5_K *)vx + ib0; float tmp = 0; // partial sum for thread in warp #if QK_K == 256 const uint16_t kmask1 = 0x3f3f; const uint16_t kmask2 = 0x0f0f; const uint16_t kmask3 = 0xc0c0; const int tid = threadIdx.x/2; // 0...15 const int ix = threadIdx.x%2; const int il = tid/4; // 0...3 const int ir = tid - 4*il;// 0...3 const int n = 2; const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224 const int in = il%2; const int l0 = n*(2*ir + in); const int q_offset = 32*im + l0; const int y_offset = 64*im + l0; const uint8_t hm1 = 1 << (2*im); const uint8_t hm2 = hm1 << 4; uint16_t aux[4]; const uint8_t * sc = (const uint8_t *)aux; uint16_t q16[8]; const uint8_t * q4 = (const uint8_t *)q16; for (int i = ix; i < num_blocks_per_row; i += 2) { const uint8_t * ql1 = x[i].qs + q_offset; const uint8_t * qh = x[i].qh + l0; const float * y1 = yy + i*QK_K + y_offset; const float * y2 = y1 + 128; const float dall = __low2half(x[i].dm); const float dmin = __high2half(x[i].dm); const uint16_t * a = (const uint16_t *)x[i].scales; aux[0] = a[im+0] & kmask1; aux[1] = a[im+2] & kmask1; aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2); aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2); float4 sum = {0.f, 0.f, 0.f, 0.f}; float smin = 0; const uint16_t * q1 = (const uint16_t *)ql1; const uint16_t * q2 = q1 + 32; q16[0] = q1[0] & 0x0f0f; q16[1] = q1[8] & 0x0f0f; q16[2] = (q1[0] >> 4) & 0x0f0f; q16[3] = (q1[8] >> 4) & 0x0f0f; q16[4] = q2[0] & 0x0f0f; q16[5] = q2[8] & 0x0f0f; q16[6] = (q2[0] >> 4) & 0x0f0f; q16[7] = (q2[8] >> 4) & 0x0f0f; for (int l = 0; l < n; ++l) { sum.x += y1[l+ 0] * (q4[l +0] + (qh[l+ 0] & (hm1 << 0) ? 16 : 0)) + y1[l+16] * (q4[l +2] + (qh[l+16] & (hm1 << 0) ? 16 : 0)); sum.y += y1[l+32] * (q4[l +4] + (qh[l+ 0] & (hm1 << 1) ? 16 : 0)) + y1[l+48] * (q4[l +6] + (qh[l+16] & (hm1 << 1) ? 16 : 0)); sum.z += y2[l+ 0] * (q4[l +8] + (qh[l+ 0] & (hm2 << 0) ? 16 : 0)) + y2[l+16] * (q4[l+10] + (qh[l+16] & (hm2 << 0) ? 16 : 0)); sum.w += y2[l+32] * (q4[l+12] + (qh[l+ 0] & (hm2 << 1) ? 16 : 0)) + y2[l+48] * (q4[l+14] + (qh[l+16] & (hm2 << 1) ? 16 : 0)); smin += (y1[l] + y1[l+16]) * sc[2] + (y1[l+32] + y1[l+48]) * sc[3] + (y2[l] + y2[l+16]) * sc[6] + (y2[l+32] + y2[l+48]) * sc[7]; } tmp += dall * (sum.x * sc[0] + sum.y * sc[1] + sum.z * sc[4] + sum.w * sc[5]) - dmin * smin; } #else const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); const int step = tid * K_QUANTS_PER_ITERATION; const int im = step/8; const int in = step%8; for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const uint8_t * q = x[i].qs + step; const int8_t * s = x[i].scales; const float * y = yy + i*QK_K + step; const float d = x[i].d; float sum = 0.f; for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) { const uint8_t h = x[i].qh[in+j] >> im; sum += y[j+ 0] * d * s[0] * ((q[j+ 0] & 0xF) - ((h >> 0) & 1 ? 0 : 16)) + y[j+16] * d * s[1] * ((q[j+16] & 0xF) - ((h >> 2) & 1 ? 0 : 16)) + y[j+32] * d * s[2] * ((q[j+ 0] >> 4) - ((h >> 4) & 1 ? 0 : 16)) + y[j+48] * d * s[3] * ((q[j+16] >> 4) - ((h >> 6) & 1 ? 0 : 16)); } tmp += sum; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } if (threadIdx.x == 0) { dst[row] = tmp; } } extern "C" __global__ void dequantize_mul_mat_vec_q6_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION"); const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q6_K * x = (const block_q6_K *)vx + ib0; #if QK_K == 256 const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0, 1 const int step = 16/K_QUANTS_PER_ITERATION; // 16 or 8 const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... const int in = tid - step*im; // 0...15 or 0...7 #if K_QUANTS_PER_ITERATION == 1 const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 const int is = 0; #else const int l0 = 4 * in; // 0, 4, 8, ..., 28 const int is = in / 4; #endif const int ql_offset = 64*im + l0; const int qh_offset = 32*im + l0; const int s_offset = 8*im + is; const int y_offset = 128*im + l0; float tmp = 0; // partial sum for thread in warp for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + y_offset; const uint8_t * ql = x[i].ql + ql_offset; const uint8_t * qh = x[i].qh + qh_offset; const int8_t * s = x[i].scales + s_offset; const float d = x[i].d; #if K_QUANTS_PER_ITERATION == 1 float sum = y[ 0] * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32) + y[16] * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32) + y[32] * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32) + y[48] * s[3] * d * ((int8_t)((ql[48] & 0xF) | ((qh[16] & 0x0c) << 2)) - 32) + y[64] * s[4] * d * ((int8_t)((ql[ 0] >> 4) | ((qh[ 0] & 0x30) >> 0)) - 32) + y[80] * s[5] * d * ((int8_t)((ql[16] >> 4) | ((qh[16] & 0x30) >> 0)) - 32) + y[96] * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32) +y[112] * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32); tmp += sum; #else float sum = 0; for (int l = 0; l < 4; ++l) { sum += y[l+ 0] * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32) + y[l+32] * s[2] * d * ((int8_t)((ql[l+32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32) + y[l+64] * s[4] * d * ((int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32) + y[l+96] * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32); } tmp += sum; #endif } #else const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...7 const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0...3 const int step = tid * K_QUANTS_PER_ITERATION; float tmp = 0; // partial sum for thread in warp for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + step; const uint8_t * ql = x[i].ql + step; const uint8_t * qh = x[i].qh + step; const int8_t * s = x[i].scales; const float d = x[i+0].d; float sum = 0; for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) { sum += y[j+ 0] * s[0] * d * ((int8_t)((ql[j+ 0] & 0xF) | ((qh[j] & 0x03) << 4)) - 32) + y[j+16] * s[1] * d * ((int8_t)((ql[j+16] & 0xF) | ((qh[j] & 0x0c) << 2)) - 32) + y[j+32] * s[2] * d * ((int8_t)((ql[j+ 0] >> 4) | ((qh[j] & 0x30) >> 0)) - 32) + y[j+48] * s[3] * d * ((int8_t)((ql[j+16] >> 4) | ((qh[j] & 0xc0) >> 2)) - 32); } tmp += sum; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } if (tid == 0) { dst[row] = tmp; } } // VDR = vec dot ratio, how many contiguous integers each thread processes when the vec dot kernel is called // MMVQ = mul_mat_vec_q, MMQ = mul_mat_q #define VDR_Q4_0_Q8_1_MMVQ 2 #define VDR_Q4_0_Q8_1_MMQ 4 template <int vdr> static __device__ __forceinline__ float vec_dot_q4_0_q8_1_impl( const int * v, const int * u, const float & d4, const half2 & ds8) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { const int vi0 = (v[i] >> 0) & 0x0F0F0F0F; const int vi1 = (v[i] >> 4) & 0x0F0F0F0F; // SIMD dot product of quantized values sumi = ggml_cuda_dp4a(vi0, u[2*i+0], sumi); sumi = ggml_cuda_dp4a(vi1, u[2*i+1], sumi); } const float2 ds8f = __half22float2(ds8); // second part effectively subtracts 8 from each quant value return d4 * (sumi * ds8f.x - (8*vdr/QI4_0) * ds8f.y); } #define VDR_Q4_1_Q8_1_MMVQ 2 #define VDR_Q4_1_Q8_1_MMQ 4 template <int vdr> static __device__ __forceinline__ float vec_dot_q4_1_q8_1_impl( const int * v, const int * u, const half2 & dm4, const half2 & ds8) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { const int vi0 = (v[i] >> 0) & 0x0F0F0F0F; const int vi1 = (v[i] >> 4) & 0x0F0F0F0F; // SIMD dot product of quantized values sumi = ggml_cuda_dp4a(vi0, u[2*i+0], sumi); sumi = ggml_cuda_dp4a(vi1, u[2*i+1], sumi); } #ifdef GGML_CUDA_F16 const float2 tmp = __half22float2(__hmul2(dm4, ds8)); const float d4d8 = tmp.x; const float m4s8 = tmp.y; #else const float2 dm4f = __half22float2(dm4); const float2 ds8f = __half22float2(ds8); const float d4d8 = dm4f.x * ds8f.x; const float m4s8 = dm4f.y * ds8f.y; #endif // GGML_CUDA_F16 // scale second part of sum by QI8_1/(vdr * QR4_1) to compensate for multiple threads adding it return sumi * d4d8 + m4s8 / (QI8_1 / (vdr * QR4_1)); } #define VDR_Q5_0_Q8_1_MMVQ 2 #define VDR_Q5_0_Q8_1_MMQ 4 template <int vdr> static __device__ __forceinline__ float vec_dot_q5_0_q8_1_impl( const int * vl, const int * vh, const int * u, const float & d5, const half2 & ds8) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4 vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12 vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20 vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28 sumi = ggml_cuda_dp4a(vi0, u[2*i+0], sumi); // SIMD dot product of quantized values int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4 vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12 vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20 vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28 sumi = ggml_cuda_dp4a(vi1, u[2*i+1], sumi); // SIMD dot product of quantized values } const float2 ds8f = __half22float2(ds8); // second part effectively subtracts 16 from each quant value return d5 * (sumi * ds8f.x - (16*vdr/QI5_0) * ds8f.y); } #define VDR_Q5_1_Q8_1_MMVQ 2 #define VDR_Q5_1_Q8_1_MMQ 4 template <int vdr> static __device__ __forceinline__ float vec_dot_q5_1_q8_1_impl( const int * vl, const int * vh, const int * u, const half2 & dm5, const half2 & ds8) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4 vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12 vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20 vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28 sumi = ggml_cuda_dp4a(vi0, u[2*i+0], sumi); // SIMD dot product of quantized values int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4 vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12 vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20 vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28 sumi = ggml_cuda_dp4a(vi1, u[2*i+1], sumi); // SIMD dot product of quantized values } #ifdef GGML_CUDA_F16 const float2 tmp = __half22float2(__hmul2(dm5, ds8)); const float d5d8 = tmp.x; const float m5s8 = tmp.y; #else const float2 dm5f = __half22float2(dm5); const float2 ds8f = __half22float2(ds8); const float d5d8 = dm5f.x * ds8f.x; const float m5s8 = dm5f.y * ds8f.y; #endif // GGML_CUDA_F16 // scale second part of sum by QI5_1 / vdr to compensate for multiple threads adding it return sumi*d5d8 + m5s8 / (QI5_1 / vdr); } #define VDR_Q8_0_Q8_1_MMVQ 2 #define VDR_Q8_0_Q8_1_MMQ 8 template <int vdr> static __device__ __forceinline__ float vec_dot_q8_0_q8_1_impl( const int * v, const int * u, const float & d8_0, const float & d8_1) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { // SIMD dot product of quantized values sumi = ggml_cuda_dp4a(v[i], u[i], sumi); } return d8_0*d8_1 * sumi; } template <int vdr> static __device__ __forceinline__ float vec_dot_q8_1_q8_1_impl( const int * v, const int * u, const half2 & dm8, const half2 & ds8) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { // SIMD dot product of quantized values sumi = ggml_cuda_dp4a(v[i], u[i], sumi); } #ifdef GGML_CUDA_F16 const float2 tmp = __half22float2(__hmul2(dm8, ds8)); const float d8d8 = tmp.x; const float m8s8 = tmp.y; #else const float2 dm8f = __half22float2(dm8); const float2 ds8f = __half22float2(ds8); const float d8d8 = dm8f.x * ds8f.x; const float m8s8 = dm8f.y * ds8f.y; #endif // GGML_CUDA_F16 // scale second part of sum by QI8_1/ vdr to compensate for multiple threads adding it return sumi*d8d8 + m8s8 / (QI8_1 / vdr); } #define VDR_Q2_K_Q8_1_MMVQ 1 #define VDR_Q2_K_Q8_1_MMQ 2 // contiguous v/x values static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmvq( const int & v, const int * __restrict__ u, const uint8_t * __restrict__ scales, const half2 & dm2, const float * __restrict__ d8) { float sumf_d = 0.0f; float sumf_m = 0.0f; #pragma unroll for (int i = 0; i < QR2_K; ++i) { const int sc = scales[2*i]; const int vi = (v >> (2*i)) & 0x03030303; sumf_d += d8[i] * (ggml_cuda_dp4a(vi, u[i], 0) * (sc & 0xF)); // SIMD dot product // fill int with 4x m int m = sc >> 4; m |= m << 8; m |= m << 16; sumf_m += d8[i] * ggml_cuda_dp4a(m, u[i], 0); // multiply constant q2_K part with sum of q8_1 values } const float2 dm2f = __half22float2(dm2); return dm2f.x*sumf_d - dm2f.y*sumf_m; } // contiguous u/y values static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ scales, const half2 & dm2, const float & d8) { int sumi_d = 0; int sumi_m = 0; #pragma unroll for (int i0 = 0; i0 < QI8_1; i0 += QI8_1/2) { int sumi_d_sc = 0; const int sc = scales[i0 / (QI8_1/2)]; // fill int with 4x m int m = sc >> 4; m |= m << 8; m |= m << 16; #pragma unroll for (int i = i0; i < i0 + QI8_1/2; ++i) { sumi_d_sc = ggml_cuda_dp4a(v[i], u[i], sumi_d_sc); // SIMD dot product sumi_m = ggml_cuda_dp4a(m, u[i], sumi_m); // multiply sum of q8_1 values with m } sumi_d += sumi_d_sc * (sc & 0xF); } const float2 dm2f = __half22float2(dm2); return d8 * (dm2f.x*sumi_d - dm2f.y*sumi_m); } #define VDR_Q3_K_Q8_1_MMVQ 1 #define VDR_Q3_K_Q8_1_MMQ 2 // contiguous v/x values static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmvq( const int & vl, const int & vh, const int * __restrict__ u, const uint8_t * __restrict__ scales, const int & scale_offset, const float & d3, const float * __restrict__ d8) { float sumf = 0.0f; #pragma unroll for (int i = 0; i < QR3_K; ++i) { const int isc = scale_offset + 2*i; const int isc_low = isc % (QK_K/32); const int sc_shift_low = 4 * (isc / (QK_K/32)); const int sc_low = (scales[isc_low] >> sc_shift_low) & 0xF; const int isc_high = isc % (QK_K/64); const int sc_shift_high = 2 * (isc / (QK_K/64)); const int sc_high = ((scales[(QK_K/32) + isc_high] >> sc_shift_high) & 3) << 4; const int sc = (sc_low | sc_high) - 32; const int vil = (vl >> (2*i)) & 0x03030303; const int vih = ((vh >> i) << 2) & 0x04040404; const int vi = __vsubss4(vil, vih); sumf += d8[i] * (ggml_cuda_dp4a(vi, u[i], 0) * sc); // SIMD dot product } return d3 * sumf; } // contiguous u/y values static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ scales, const float & d3, const float & d8) { int sumi = 0; #pragma unroll for (int i0 = 0; i0 < QR3_K*VDR_Q3_K_Q8_1_MMQ; i0 += QI8_1/2) { int sumi_sc = 0; for (int i = i0; i < i0 + QI8_1/2; ++i) { sumi_sc = ggml_cuda_dp4a(v[i], u[i], sumi_sc); // SIMD dot product } sumi += sumi_sc * scales[i0 / (QI8_1/2)]; } return d3*d8 * sumi; } #define VDR_Q4_K_Q8_1_MMVQ 2 #define VDR_Q4_K_Q8_1_MMQ 8 // contiguous v/x values static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_vmmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm4, const float * __restrict__ d8) { float sumf_d = 0.0f; float sumf_m = 0.0f; #pragma unroll for (int i = 0; i < QR4_K; ++i) { const int v0i = (v[0] >> (4*i)) & 0x0F0F0F0F; const int v1i = (v[1] >> (4*i)) & 0x0F0F0F0F; const int dot1 = ggml_cuda_dp4a(v1i, u[2*i+1], ggml_cuda_dp4a(v0i, u[2*i+0], 0)); // SIMD dot product const int dot2 = ggml_cuda_dp4a(0x01010101, u[2*i+1], ggml_cuda_dp4a(0x01010101, u[2*i+0], 0)); // sum of u sumf_d += d8[i] * (dot1 * sc[i]); sumf_m += d8[i] * (dot2 * m[i]); // multiply constant part of q4_K with sum of q8_1 values } const float2 dm4f = __half22float2(dm4); return dm4f.x*sumf_d - dm4f.y*sumf_m; } // contiguous u/y values static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) { float sumf_d = 0.0f; float sumf_m = 0.0f; #pragma unroll for (int i = 0; i < QR4_K*VDR_Q4_K_Q8_1_MMQ/QI8_1; ++i) { int sumi_d = 0; #pragma unroll for (int j = 0; j < QI8_1; ++j) { sumi_d = ggml_cuda_dp4a((v[j] >> (4*i)) & 0x0F0F0F0F, u[i*QI8_1 + j], sumi_d); // SIMD dot product } const float2 ds8f = __half22float2(ds8[i]); sumf_d += ds8f.x * (sc[i] * sumi_d); sumf_m += ds8f.y * m[i]; // sum of q8_1 block * q4_K min val } const float2 dm4f = __half22float2(dm4); return dm4f.x*sumf_d - dm4f.y*sumf_m; } #define VDR_Q5_K_Q8_1_MMVQ 2 #define VDR_Q5_K_Q8_1_MMQ 8 // contiguous v/x values static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_vmmq( const int * __restrict__ vl, const int * __restrict__ vh, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm5, const float * __restrict__ d8) { float sumf_d = 0.0f; float sumf_m = 0.0f; #pragma unroll for (int i = 0; i < QR5_K; ++i) { const int vl0i = (vl[0] >> (4*i)) & 0x0F0F0F0F; const int vl1i = (vl[1] >> (4*i)) & 0x0F0F0F0F; const int vh0i = ((vh[0] >> i) << 4) & 0x10101010; const int vh1i = ((vh[1] >> i) << 4) & 0x10101010; const int v0i = vl0i | vh0i; const int v1i = vl1i | vh1i; const int dot1 = ggml_cuda_dp4a(v0i, u[2*i+0], ggml_cuda_dp4a(v1i, u[2*i+1], 0)); // SIMD dot product const int dot2 = ggml_cuda_dp4a(0x01010101, u[2*i+0], ggml_cuda_dp4a(0x01010101, u[2*i+1], 0)); // sum of u sumf_d += d8[i] * (dot1 * sc[i]); sumf_m += d8[i] * (dot2 * m[i]); } const float2 dm5f = __half22float2(dm5); return dm5f.x*sumf_d - dm5f.y*sumf_m; } // contiguous u/y values static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) { float sumf_d = 0.0f; float sumf_m = 0.0f; #pragma unroll for (int i = 0; i < QR5_K*VDR_Q5_K_Q8_1_MMQ/QI8_1; ++i) { int sumi_d = 0; #pragma unroll for (int j = 0; j < QI8_1; ++j) { sumi_d = ggml_cuda_dp4a(v[i*QI8_1 + j], u[i*QI8_1 + j], sumi_d); // SIMD dot product } const float2 ds8f = __half22float2(ds8[i]); sumf_d += ds8f.x * (sc[i] * sumi_d); sumf_m += ds8f.y * m[i]; // sum of q8_1 block * q4_K min val } const float2 dm4f = __half22float2(dm4); return dm4f.x*sumf_d - dm4f.y*sumf_m; } #define VDR_Q6_K_Q8_1_MMVQ 1 #define VDR_Q6_K_Q8_1_MMQ 8 // contiguous v/x values static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmvq( const int & vl, const int & vh, const int * __restrict__ u, const int8_t * __restrict__ scales, const float & d, const float * __restrict__ d8) { float sumf = 0.0f; #pragma unroll for (int i = 0; i < QR6_K; ++i) { const int sc = scales[4*i]; const int vil = (vl >> (4*i)) & 0x0F0F0F0F; const int vih = ((vh >> (4*i)) << 4) & 0x30303030; const int vi = __vsubss4((vil | vih), 0x20202020); // vi = (vil | vih) - 32 sumf += d8[i] * (ggml_cuda_dp4a(vi, u[i], 0) * sc); // SIMD dot product } return d*sumf; } // contiguous u/y values static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ sc, const float & d6, const float * __restrict__ d8) { float sumf_d = 0.0f; #pragma unroll for (int i0 = 0; i0 < VDR_Q6_K_Q8_1_MMQ; i0 += 4) { int2 sumi_d = {0, 0}; // 2 q6_K scales per q8_1 scale #pragma unroll for (int i = i0; i < i0 + 2; ++i) { sumi_d.x = ggml_cuda_dp4a(v[2*i+0], u[2*i+0], sumi_d.x); // SIMD dot product sumi_d.x = ggml_cuda_dp4a(v[2*i+1], u[2*i+1], sumi_d.x); // SIMD dot product sumi_d.y = ggml_cuda_dp4a(v[2*i+4], u[2*i+4], sumi_d.y); // SIMD dot product sumi_d.y = ggml_cuda_dp4a(v[2*i+5], u[2*i+5], sumi_d.y); // SIMD dot product } sumf_d += d8[i0/4] * (sc[i0/2+0]*sumi_d.x + sc[i0/2+1]*sumi_d.y); } return d6 * sumf_d; } static __device__ __forceinline__ float vec_dot_q4_0_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { const block_q4_0 * bq4_0 = (const block_q4_0 *) vbq; int v[VDR_Q4_0_Q8_1_MMVQ]; int u[2*VDR_Q4_0_Q8_1_MMVQ]; #pragma unroll for (int i = 0; i < VDR_Q4_0_Q8_1_MMVQ; ++i) { v[i] = get_int_from_uint8(bq4_0->qs, iqs + i); u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_0); } return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMVQ>(v, u, bq4_0->d, bq8_1->ds); } static __device__ __forceinline__ float vec_dot_q4_1_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { const block_q4_1 * bq4_1 = (const block_q4_1 *) vbq; int v[VDR_Q4_1_Q8_1_MMVQ]; int u[2*VDR_Q4_1_Q8_1_MMVQ]; #pragma unroll for (int i = 0; i < VDR_Q4_1_Q8_1_MMVQ; ++i) { v[i] = get_int_from_uint8_aligned(bq4_1->qs, iqs + i); u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_1); } return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMVQ>(v, u, bq4_1->dm, bq8_1->ds); } static __device__ __forceinline__ float vec_dot_q5_0_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { const block_q5_0 * bq5_0 = (const block_q5_0 *) vbq; int vl[VDR_Q5_0_Q8_1_MMVQ]; int vh[VDR_Q5_0_Q8_1_MMVQ]; int u[2*VDR_Q5_0_Q8_1_MMVQ]; #pragma unroll for (int i = 0; i < VDR_Q5_0_Q8_1_MMVQ; ++i) { vl[i] = get_int_from_uint8(bq5_0->qs, iqs + i); vh[i] = get_int_from_uint8(bq5_0->qh, 0) >> (4 * (iqs + i)); u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_0); } return vec_dot_q5_0_q8_1_impl<VDR_Q5_0_Q8_1_MMVQ>(vl, vh, u, bq5_0->d, bq8_1->ds); } static __device__ __forceinline__ float vec_dot_q5_1_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { const block_q5_1 * bq5_1 = (const block_q5_1 *) vbq; int vl[VDR_Q5_1_Q8_1_MMVQ]; int vh[VDR_Q5_1_Q8_1_MMVQ]; int u[2*VDR_Q5_1_Q8_1_MMVQ]; #pragma unroll for (int i = 0; i < VDR_Q5_1_Q8_1_MMVQ; ++i) { vl[i] = get_int_from_uint8_aligned(bq5_1->qs, iqs + i); vh[i] = get_int_from_uint8_aligned(bq5_1->qh, 0) >> (4 * (iqs + i)); u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_1); } return vec_dot_q5_1_q8_1_impl<VDR_Q5_1_Q8_1_MMVQ>(vl, vh, u, bq5_1->dm, bq8_1->ds); } static __device__ __forceinline__ float vec_dot_q8_0_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { const block_q8_0 * bq8_0 = (const block_q8_0 *) vbq; int v[VDR_Q8_0_Q8_1_MMVQ]; int u[VDR_Q8_0_Q8_1_MMVQ]; #pragma unroll for (int i = 0; i < VDR_Q8_0_Q8_1_MMVQ; ++i) { v[i] = get_int_from_int8(bq8_0->qs, iqs + i); u[i] = get_int_from_int8_aligned(bq8_1->qs, iqs + i); } return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMVQ>(v, u, bq8_0->d, __low2half(bq8_1->ds)); } static __device__ __forceinline__ float vec_dot_q2_K_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { const block_q2_K * bq2_K = (const block_q2_K *) vbq; const int bq8_offset = QR2_K * (iqs / QI8_1); const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2); const uint8_t * scales = bq2_K->scales + scale_offset; const int v = get_int_from_uint8_aligned(bq2_K->qs, iqs); int u[QR2_K]; float d8[QR2_K]; #pragma unroll for (int i = 0; i < QR2_K; ++ i) { u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1); d8[i] = __low2float(bq8_1[bq8_offset + i].ds); } return vec_dot_q2_K_q8_1_impl_mmvq(v, u, scales, bq2_K->dm, d8); } static __device__ __forceinline__ float vec_dot_q3_K_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { const block_q3_K * bq3_K = (const block_q3_K *) vbq; const int bq8_offset = QR3_K * (iqs / (QI3_K/2)); const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2); const float d = bq3_K->d; const int vl = get_int_from_uint8(bq3_K->qs, iqs); // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted const int vh = ~get_int_from_uint8(bq3_K->hmask, iqs % (QI3_K/2)) >> bq8_offset; int u[QR3_K]; float d8[QR3_K]; #pragma unroll for (int i = 0; i < QR3_K; ++i) { u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1); d8[i] = __low2float(bq8_1[bq8_offset + i].ds); } return vec_dot_q3_K_q8_1_impl_mmvq(vl, vh, u, bq3_K->scales, scale_offset, d, d8); } static __device__ __forceinline__ float vec_dot_q4_K_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { #ifndef GGML_QKK_64 const block_q4_K * bq4_K = (const block_q4_K *) vbq; int v[2]; int u[2*QR4_K]; float d8[QR4_K]; // iqs is in 0,2..30. bq8_offset = iqs/4 -> bq8_offset = 0, 2, 4, 6 const int bq8_offset = QR4_K * ((iqs/2) / (QI8_1/2)); // iqs = 0....3 -> bq8_offset = 0, want q4_offset = 0, 4, 8, 12 // iqs = 4....7 -> bq8_offset = 2, want q4_offset = 32, 36, 40, 44 // iqs = 8...11 -> bq8_offset = 4, want q4_offset = 64, 68, 72, 76 // iqs = 12..15 -> bq8_offset = 6, want q4_offset = 96, 100, 104, 108 const int * q4 = (const int *)(bq4_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4)); v[0] = q4[0]; v[1] = q4[4]; const uint16_t * scales = (const uint16_t *)bq4_K->scales; uint16_t aux[2]; const int j = bq8_offset/2; if (j < 2) { aux[0] = scales[j+0] & 0x3f3f; aux[1] = scales[j+2] & 0x3f3f; } else { aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2); aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2); } const uint8_t * sc = (const uint8_t *)aux; const uint8_t * m = sc + 2; for (int i = 0; i < QR4_K; ++i) { const block_q8_1 * bq8i = bq8_1 + bq8_offset + i; d8[i] = __low2float(bq8i->ds); const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4); u[2*i+0] = q8[0]; u[2*i+1] = q8[4]; } return vec_dot_q4_K_q8_1_impl_vmmq(v, u, sc, m, bq4_K->dm, d8); #else const block_q4_K * bq4_K = (const block_q4_K *) vbq; float sumf_d = 0.0f; float sumf_m = 0.0f; uint16_t aux16[2]; const uint8_t * s = (const uint8_t *)aux16; const uint16_t * a = (const uint16_t *)bq4_K->scales; aux16[0] = a[0] & 0x0f0f; aux16[1] = (a[0] >> 4) & 0x0f0f; const float dall = bq4_K->dm[0]; const float dmin = bq4_K->dm[1]; const float d8_1 = __low2float(bq8_1[0].ds); const float d8_2 = __low2float(bq8_1[1].ds); const int ui1 = *((const int *)bq8_1[0].qs + (iqs/2)); const int ui2 = *((const int *)bq8_1[0].qs + (iqs/2) + 4); const int ui3 = *((const int *)bq8_1[1].qs + (iqs/2)); const int ui4 = *((const int *)bq8_1[1].qs + (iqs/2) + 4); const int * q4 = (const int *)bq4_K->qs + (iqs/2); const int v1 = q4[0]; const int v2 = q4[4]; const int dot1 = ggml_cuda_dp4a(ui2, v2 & 0x0f0f0f0f, ggml_cuda_dp4a(ui1, v1 & 0x0f0f0f0f, 0)); const int dot2 = ggml_cuda_dp4a(ui4, (v2 >> 4) & 0x0f0f0f0f, ggml_cuda_dp4a(ui3, (v1 >> 4) & 0x0f0f0f0f, 0)); const int dot3 = ggml_cuda_dp4a(0x01010101, ui2, ggml_cuda_dp4a(0x01010101, ui1, 0)); const int dot4 = ggml_cuda_dp4a(0x01010101, ui4, ggml_cuda_dp4a(0x01010101, ui3, 0)); sumf_d += d8_1 * (dot1 * s[0]) + d8_2 * (dot2 * s[1]); sumf_m += d8_1 * (dot3 * s[2]) + d8_2 * (dot4 * s[3]); return dall * sumf_d - dmin * sumf_m; #endif } static __device__ __forceinline__ float vec_dot_q5_K_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { #ifndef GGML_QKK_64 const block_q5_K * bq5_K = (const block_q5_K *) vbq; int vl[2]; int vh[2]; int u[2*QR5_K]; float d8[QR5_K]; const int bq8_offset = QR5_K * ((iqs/2) / (QI8_1/2)); const int * ql = (const int *)(bq5_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4)); const int * qh = (const int *)(bq5_K->qh + 4 * ((iqs/2)%4)); vl[0] = ql[0]; vl[1] = ql[4]; vh[0] = qh[0] >> bq8_offset; vh[1] = qh[4] >> bq8_offset; const uint16_t * scales = (const uint16_t *)bq5_K->scales; uint16_t aux[2]; const int j = bq8_offset/2; if (j < 2) { aux[0] = scales[j+0] & 0x3f3f; aux[1] = scales[j+2] & 0x3f3f; } else { aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2); aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2); } const uint8_t * sc = (const uint8_t *)aux; const uint8_t * m = sc + 2; #pragma unroll for (int i = 0; i < QR5_K; ++i) { const block_q8_1 * bq8i = bq8_1 + bq8_offset + i; d8[i] = __low2float(bq8i->ds); const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4); u[2*i+0] = q8[0]; u[2*i+1] = q8[4]; } return vec_dot_q5_K_q8_1_impl_vmmq(vl, vh, u, sc, m, bq5_K->dm, d8); #else const block_q5_K * bq5_K = (const block_q5_K *) vbq; const int8_t * s = bq5_K->scales; const float d = bq5_K->d; const float d8_1 = __low2half(bq8_1[0].ds); const float d8_2 = __low2half(bq8_1[1].ds); const int ui1 = *((const int *)bq8_1[0].qs + (iqs/2)); const int ui2 = *((const int *)bq8_1[0].qs + (iqs/2) + 4); const int ui3 = *((const int *)bq8_1[1].qs + (iqs/2)); const int ui4 = *((const int *)bq8_1[1].qs + (iqs/2) + 4); const int * ql = (const int *)bq5_K->qs + (iqs/2); const int vl1 = ql[0]; const int vl2 = ql[4]; const int step = 4 * (iqs/2); // 0, 4, 8, 12 const int im = step/8; // = 0 for iqs = 0, 2, = 1 for iqs = 4, 6 const int in = step%8; // 0, 4, 0, 4 const int vh = (*((const int *)(bq5_K->qh + in))) >> im; const int v1 = (((vh << 4) & 0x10101010) ^ 0x10101010) | ((vl1 >> 0) & 0x0f0f0f0f); const int v2 = (((vh << 2) & 0x10101010) ^ 0x10101010) | ((vl2 >> 0) & 0x0f0f0f0f); const int v3 = (((vh >> 0) & 0x10101010) ^ 0x10101010) | ((vl1 >> 4) & 0x0f0f0f0f); const int v4 = (((vh >> 2) & 0x10101010) ^ 0x10101010) | ((vl2 >> 4) & 0x0f0f0f0f); const float sumf_d = d8_1 * (ggml_cuda_dp4a(ui1, v1, 0) * s[0] + ggml_cuda_dp4a(ui2, v2, 0) * s[1]) + d8_2 * (ggml_cuda_dp4a(ui3, v3, 0) * s[2] + ggml_cuda_dp4a(ui4, v4, 0) * s[3]); return d * sumf_d; #endif } static __device__ __forceinline__ float vec_dot_q6_K_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { const block_q6_K * bq6_K = (const block_q6_K *) vbq; const int bq8_offset = 2 * QR6_K * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/4); const int scale_offset = (QI6_K/4) * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/8); const int vh_shift = 2 * ((iqs % (QI6_K/2)) / (QI6_K/4)); const int vl = get_int_from_uint8(bq6_K->ql, iqs); const int vh = get_int_from_uint8(bq6_K->qh, (QI6_K/4) * (iqs / (QI6_K/2)) + iqs % (QI6_K/4)) >> vh_shift; const int8_t * scales = bq6_K->scales + scale_offset; int u[QR6_K]; float d8[QR6_K]; #pragma unroll for (int i = 0; i < QR6_K; ++i) { u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + 2*i].qs, iqs % QI8_1); d8[i] = __low2float(bq8_1[bq8_offset + 2*i].ds); } return vec_dot_q6_K_q8_1_impl_mmvq(vl, vh, u, scales, bq6_K->d, d8); } // https://github.com/ggerganov/llama.cpp/blob/c50a82ce0f71558cbb8e555146ba124251504b38/ggml-cuda/mmvq.cu#L4 typedef float (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs); template <int ncols_y, int qk, int qi, typename block_q_t, int vdr, vec_dot_q_cuda_t vec_dot_q_cuda> static __device__ void mul_mat_vec_q( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) && (defined(RDNA2) || defined(RDNA3)) constexpr int nwarps = 1; constexpr int rows_per_cuda_block = 1; #else constexpr int nwarps = ncols_y <= 4 ? 4 : 2; constexpr int rows_per_cuda_block = ncols_y == 1 ? 1 : 2; #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) && !defined(RDNA2) && !defined(RDNA3) const int tid = WARP_SIZE*threadIdx.y + threadIdx.x; const int row0 = rows_per_cuda_block*blockIdx.x; const int blocks_per_row_x = ncols_x / qk; const int blocks_per_col_y = nrows_y / QK8_1; constexpr int blocks_per_iter = vdr * nwarps*WARP_SIZE / qi; // partial sum for each thread float tmp[ncols_y][rows_per_cuda_block] = {0.0f}; const block_q_t * x = (const block_q_t *) vx; const block_q8_1 * y = (const block_q8_1 *) vy; for (int kbx = tid / (qi/vdr); kbx < blocks_per_row_x; kbx += blocks_per_iter) { const int kby = kbx * (qk/QK8_1); // y block index that aligns with kbx // x block quant index when casting the quants to int const int kqs = vdr * (tid % (qi/vdr)); #pragma unroll for (int j = 0; j < ncols_y; ++j) { #pragma unroll for (int i = 0; i < rows_per_cuda_block; ++i) { tmp[j][i] += vec_dot_q_cuda( &x[kbx + (row0 + i)*blocks_per_row_x], &y[j*blocks_per_col_y + kby], kqs); } } } __shared__ float tmp_shared[nwarps-1 > 0 ? nwarps-1 : 1][ncols_y][rows_per_cuda_block][WARP_SIZE]; if (threadIdx.y > 0) { #pragma unroll for (int j = 0; j < ncols_y; ++j) { #pragma unroll for (int i = 0; i < rows_per_cuda_block; ++i) { tmp_shared[threadIdx.y-1][j][i][threadIdx.x] = tmp[j][i]; } } } __syncthreads(); if (threadIdx.y > 0) { return; } // sum up partial sums and write back result #pragma unroll for (int j = 0; j < ncols_y; ++j) { #pragma unroll for (int i = 0; i < rows_per_cuda_block; ++i) { #pragma unroll for (int l = 0; l < nwarps-1; ++l) { tmp[j][i] += tmp_shared[l][j][i][threadIdx.x]; } tmp[j][i] = warp_reduce_sum(tmp[j][i]); } if (threadIdx.x < rows_per_cuda_block) { dst[j*nrows_dst + row0 + threadIdx.x] = tmp[j][threadIdx.x]; } } } // batch size = 1 extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda1( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<1, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } // batch size = 2 extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda2( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<2, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } // batch size = 3 extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda3( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<3, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } // batch size = 4 extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda4( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<4, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } // batch size = 5 extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda5( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<5, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } // batch size = 6 extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda6( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<6, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } // batch size = 7 extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda7( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<7, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } // batch size = 8 extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda8( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) { mul_mat_vec_q<8, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1> (vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst); } extern "C" __global__ void quantize_q8_1(const float * __restrict__ x, void * __restrict__ vy, const int kx, const int kx_padded) { const int ix = blockDim.x*blockIdx.x + threadIdx.x; if (ix >= kx_padded) { return; } const int iy = blockDim.y*blockIdx.y + threadIdx.y; const int i_padded = iy*kx_padded + ix; block_q8_1 * y = (block_q8_1 *) vy; const int ib = i_padded / QK8_1; // block index const int iqs = i_padded % QK8_1; // quant index const float xi = ix < kx ? x[iy*kx + ix] : 0.0f; float amax = fabsf(xi); float sum = xi; amax = warp_reduce_max(amax); sum = warp_reduce_sum(sum); const float d = amax / 127; const int8_t q = amax == 0.0f ? 0 : roundf(xi / d); y[ib].qs[iqs] = q; if (iqs > 0) { return; } reinterpret_cast<half&>(y[ib].ds.x) = d; reinterpret_cast<half&>(y[ib].ds.y) = sum; } // Kernels from https://github.com/ggerganov/llama.cpp/blob/master/ggml-cuda/mmq.cu template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI5_0) + mmq_y/QI5_0]; *x_ql = tile_x_ql; *x_dm = (half2 *) tile_x_d; } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_0( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); GGML_CUDA_ASSUME(i_offset >= 0); GGML_CUDA_ASSUME(i_offset < nwarps); GGML_CUDA_ASSUME(k >= 0); GGML_CUDA_ASSUME(k < WARP_SIZE); const int kbx = k / QI5_0; const int kqsx = k % QI5_0; const block_q5_0 * bx0 = (const block_q5_0 *) vx; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbx; const int ql = get_int_from_uint8(bxi->qs, kqsx); const int qh = get_int_from_uint8(bxi->qh, 0) >> (4 * (k % QI5_0)); int qs0 = (ql >> 0) & 0x0F0F0F0F; qs0 |= (qh << 4) & 0x00000010; // 0 -> 4 qs0 |= (qh << 11) & 0x00001000; // 1 -> 12 qs0 |= (qh << 18) & 0x00100000; // 2 -> 20 qs0 |= (qh << 25) & 0x10000000; // 3 -> 28 qs0 = __vsubss4(qs0, 0x10101010); // subtract 16 x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0; int qs1 = (ql >> 4) & 0x0F0F0F0F; qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4 qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12 qs1 |= (qh << 2) & 0x00100000; // 18 -> 20 qs1 |= (qh << 9) & 0x10000000; // 19 -> 28 qs1 = __vsubss4(qs1, 0x10101010); // subtract 16 x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1; } const int blocks_per_tile_x_row = WARP_SIZE / QI5_0; const int kbxd = k % blocks_per_tile_x_row; float * x_dmf = (float *) x_dm; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_0) { int i = i0 + i_offset * QI5_0 + k / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); } const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbxd; x_dmf[i * (WARP_SIZE/QI5_0) + i / QI5_0 + kbxd] = bxi->d; } } static __device__ __forceinline__ float vec_dot_q5_0_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); const int index_bx = i * (WARP_SIZE/QI5_0) + i/QI5_0 + k/QI5_0; const float * x_dmf = (const float *) x_dm; const float * y_df = (const float *) y_ds; int u[2*VDR_Q5_0_Q8_1_MMQ]; #pragma unroll for (int l = 0; l < VDR_Q5_0_Q8_1_MMQ; ++l) { u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_0) % WARP_SIZE]; } return vec_dot_q8_0_q8_1_impl<QR5_0*VDR_Q5_0_Q8_1_MMQ> (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dmf[index_bx], y_df[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_1) + mmq_y/QI5_1]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_1( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); GGML_CUDA_ASSUME(i_offset >= 0); GGML_CUDA_ASSUME(i_offset < nwarps); GGML_CUDA_ASSUME(k >= 0); GGML_CUDA_ASSUME(k < WARP_SIZE); const int kbx = k / QI5_1; const int kqsx = k % QI5_1; const block_q5_1 * bx0 = (const block_q5_1 *) vx; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbx; const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx); const int qh = get_int_from_uint8_aligned(bxi->qh, 0) >> (4 * (k % QI5_1)); int qs0 = (ql >> 0) & 0x0F0F0F0F; qs0 |= (qh << 4) & 0x00000010; // 0 -> 4 qs0 |= (qh << 11) & 0x00001000; // 1 -> 12 qs0 |= (qh << 18) & 0x00100000; // 2 -> 20 qs0 |= (qh << 25) & 0x10000000; // 3 -> 28 x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0; int qs1 = (ql >> 4) & 0x0F0F0F0F; qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4 qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12 qs1 |= (qh << 2) & 0x00100000; // 18 -> 20 qs1 |= (qh << 9) & 0x10000000; // 19 -> 28 x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1; } const int blocks_per_tile_x_row = WARP_SIZE / QI5_1; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_1) { int i = i0 + i_offset * QI5_1 + k / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); } const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbxd; x_dm[i * (WARP_SIZE/QI5_1) + i / QI5_1 + kbxd] = bxi->dm; } } static __device__ __forceinline__ float vec_dot_q5_1_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); const int index_bx = i * (WARP_SIZE/QI5_1) + + i/QI5_1 + k/QI5_1; int u[2*VDR_Q5_1_Q8_1_MMQ]; #pragma unroll for (int l = 0; l < VDR_Q5_1_Q8_1_MMQ; ++l) { u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_1) % WARP_SIZE]; } return vec_dot_q8_1_q8_1_impl<QR5_1*VDR_Q5_1_Q8_1_MMQ> (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dm[index_bx], y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q8_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y]; __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI8_0) + mmq_y/QI8_0]; *x_ql = tile_x_qs; *x_dm = (half2 *) tile_x_d; } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q8_0( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); GGML_CUDA_ASSUME(i_offset >= 0); GGML_CUDA_ASSUME(i_offset < nwarps); GGML_CUDA_ASSUME(k >= 0); GGML_CUDA_ASSUME(k < WARP_SIZE); const int kbx = k / QI8_0; const int kqsx = k % QI8_0; float * x_dmf = (float *) x_dm; const block_q8_0 * bx0 = (const block_q8_0 *) vx; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbx; x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_int8(bxi->qs, kqsx); } const int blocks_per_tile_x_row = WARP_SIZE / QI8_0; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI8_0) { int i = i0 + i_offset * QI8_0 + k / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); } const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbxd; x_dmf[i * (WARP_SIZE/QI8_0) + i / QI8_0 + kbxd] = bxi->d; } } static __device__ __forceinline__ float vec_dot_q8_0_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); const float * x_dmf = (const float *) x_dm; const float * y_df = (const float *) y_ds; return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMQ> (&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[j * WARP_SIZE + k], x_dmf[i * (WARP_SIZE/QI8_0) + i/QI8_0 + k/QI8_0], y_df[j * (WARP_SIZE/QI8_1) + k/QI8_1]); } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q2_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { GGML_UNUSED(x_qh); __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y]; __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI2_K) + mmq_y/QI2_K]; __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; *x_sc = tile_x_sc; } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q2_K( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { GGML_UNUSED(x_qh); GGML_CUDA_ASSUME(i_offset >= 0); GGML_CUDA_ASSUME(i_offset < nwarps); GGML_CUDA_ASSUME(k >= 0); GGML_CUDA_ASSUME(k < WARP_SIZE); const int kbx = k / QI2_K; const int kqsx = k % QI2_K; const block_q2_K * bx0 = (const block_q2_K *) vx; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q2_K * bxi = bx0 + i*blocks_per_row + kbx; x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); } const int blocks_per_tile_x_row = WARP_SIZE / QI2_K; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI2_K) { int i = (i0 + i_offset * QI2_K + k / blocks_per_tile_x_row) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q2_K * bxi = bx0 + i*blocks_per_row + kbxd; x_dm[i * (WARP_SIZE/QI2_K) + i / QI2_K + kbxd] = bxi->dm; } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) { int i = i0 + i_offset * 4 + k / (WARP_SIZE/4); if (need_check) { i = min(i, i_max); } const block_q2_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI2_K/4); x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = get_int_from_uint8_aligned(bxi->scales, k % (QI2_K/4)); } } static __device__ __forceinline__ float vec_dot_q2_K_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { GGML_UNUSED(x_qh); const int kbx = k / QI2_K; const int ky = (k % QI2_K) * QR2_K; const float * y_df = (const float *) y_ds; int v[QR2_K*VDR_Q2_K_Q8_1_MMQ]; const int kqsx = i * (WARP_SIZE + 1) + kbx*QI2_K + (QI2_K/2) * (ky/(2*QI2_K)) + ky % (QI2_K/2); const int shift = 2 * ((ky % (2*QI2_K)) / (QI2_K/2)); #pragma unroll for (int l = 0; l < QR2_K*VDR_Q2_K_Q8_1_MMQ; ++l) { v[l] = (x_ql[kqsx + l] >> shift) & 0x03030303; } const uint8_t * scales = ((const uint8_t *) &x_sc[i * (WARP_SIZE/4) + i/4 + kbx*4]) + ky/4; const int index_y = j * WARP_SIZE + (QR2_K*k) % WARP_SIZE; return vec_dot_q2_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dm[i * (WARP_SIZE/QI2_K) + i/QI2_K + kbx], y_df[index_y/QI8_1]); } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q3_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y]; __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI3_K) + mmq_y/QI3_K]; __shared__ int tile_x_qh[mmq_y * (WARP_SIZE/2) + mmq_y/2]; __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; *x_qh = tile_x_qh; *x_sc = tile_x_sc; } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q3_K( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { GGML_CUDA_ASSUME(i_offset >= 0); GGML_CUDA_ASSUME(i_offset < nwarps); GGML_CUDA_ASSUME(k >= 0); GGML_CUDA_ASSUME(k < WARP_SIZE); const int kbx = k / QI3_K; const int kqsx = k % QI3_K; const block_q3_K * bx0 = (const block_q3_K *) vx; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q3_K * bxi = bx0 + i*blocks_per_row + kbx; x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx); } const int blocks_per_tile_x_row = WARP_SIZE / QI3_K; const int kbxd = k % blocks_per_tile_x_row; float * x_dmf = (float *) x_dm; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI3_K) { int i = (i0 + i_offset * QI3_K + k / blocks_per_tile_x_row) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q3_K * bxi = bx0 + i*blocks_per_row + kbxd; x_dmf[i * (WARP_SIZE/QI3_K) + i / QI3_K + kbxd] = bxi->d; } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 2) { int i = i0 + i_offset * 2 + k / (WARP_SIZE/2); if (need_check) { i = min(i, i_max); } const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/2)) / (QI3_K/2); // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted x_qh[i * (WARP_SIZE/2) + i / 2 + k % (WARP_SIZE/2)] = ~get_int_from_uint8(bxi->hmask, k % (QI3_K/2)); } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) { int i = i0 + i_offset * 4 + k / (WARP_SIZE/4); if (need_check) { i = min(i, i_max); } const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI3_K/4); const int ksc = k % (QI3_K/4); const int ksc_low = ksc % (QI3_K/8); const int shift_low = 4 * (ksc / (QI3_K/8)); const int sc_low = (get_int_from_uint8(bxi->scales, ksc_low) >> shift_low) & 0x0F0F0F0F; const int ksc_high = QI3_K/8; const int shift_high = 2 * ksc; const int sc_high = ((get_int_from_uint8(bxi->scales, ksc_high) >> shift_high) << 4) & 0x30303030; const int sc = __vsubss4(sc_low | sc_high, 0x20202020); x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = sc; } } static __device__ __forceinline__ float vec_dot_q3_K_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { const int kbx = k / QI3_K; const int ky = (k % QI3_K) * QR3_K; const float * x_dmf = (const float *) x_dm; const float * y_df = (const float *) y_ds; const int8_t * scales = ((const int8_t *) (x_sc + i * (WARP_SIZE/4) + i/4 + kbx*4)) + ky/4; int v[QR3_K*VDR_Q3_K_Q8_1_MMQ]; #pragma unroll for (int l = 0; l < QR3_K*VDR_Q3_K_Q8_1_MMQ; ++l) { const int kqsx = i * (WARP_SIZE + 1) + kbx*QI3_K + (QI3_K/2) * (ky/(2*QI3_K)) + ky % (QI3_K/2); const int shift = 2 * ((ky % 32) / 8); const int vll = (x_ql[kqsx + l] >> shift) & 0x03030303; const int vh = x_qh[i * (WARP_SIZE/2) + i/2 + kbx * (QI3_K/2) + (ky+l)%8] >> ((ky+l) / 8); const int vlh = (vh << 2) & 0x04040404; v[l] = __vsubss4(vll, vlh); } const int index_y = j * WARP_SIZE + (k*QR3_K) % WARP_SIZE; return vec_dot_q3_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dmf[i * (WARP_SIZE/QI3_K) + i/QI3_K + kbx], y_df[index_y/QI8_1]); } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { GGML_UNUSED(x_qh); __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y]; __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_K) + mmq_y/QI4_K]; __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; *x_sc = tile_x_sc; } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_K( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { GGML_UNUSED(x_qh); GGML_CUDA_ASSUME(i_offset >= 0); GGML_CUDA_ASSUME(i_offset < nwarps); GGML_CUDA_ASSUME(k >= 0); GGML_CUDA_ASSUME(k < WARP_SIZE); const int kbx = k / QI4_K; // == 0 if QK_K == 256 const int kqsx = k % QI4_K; // == k if QK_K == 256 const block_q4_K * bx0 = (const block_q4_K *) vx; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q4_K * bxi = bx0 + i*blocks_per_row + kbx; x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); } const int blocks_per_tile_x_row = WARP_SIZE / QI4_K; // == 1 if QK_K == 256 const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_K) { int i = (i0 + i_offset * QI4_K + k / blocks_per_tile_x_row) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q4_K * bxi = bx0 + i*blocks_per_row + kbxd; #if QK_K == 256 x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = bxi->dm; #else x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = {bxi->dm[0], bxi->dm[1]}; #endif } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q4_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI4_K/8); const int * scales = (const int *) bxi->scales; const int ksc = k % (WARP_SIZE/8); // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8 int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8; } } static __device__ __forceinline__ float vec_dot_q4_K_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { GGML_UNUSED(x_qh); const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2*((k % 16) / 8); const int index_y = j * WARP_SIZE + (QR4_K*k) % WARP_SIZE; return vec_dot_q4_K_q8_1_impl_mmq(&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[index_y], sc, sc+8, x_dm[i * (WARP_SIZE/QI4_K) + i/QI4_K], &y_ds[index_y/QI8_1]); } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { GGML_UNUSED(x_qh); __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_K) + mmq_y/QI5_K]; __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; *x_sc = tile_x_sc; } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_K( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { GGML_UNUSED(x_qh); GGML_CUDA_ASSUME(i_offset >= 0); GGML_CUDA_ASSUME(i_offset < nwarps); GGML_CUDA_ASSUME(k >= 0); GGML_CUDA_ASSUME(k < WARP_SIZE); const int kbx = k / QI5_K; // == 0 if QK_K == 256 const int kqsx = k % QI5_K; // == k if QK_K == 256 const block_q5_K * bx0 = (const block_q5_K *) vx; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q5_K * bxi = bx0 + i*blocks_per_row + kbx; const int ky = QR5_K*kqsx; const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx); const int ql0 = (ql >> 0) & 0x0F0F0F0F; const int ql1 = (ql >> 4) & 0x0F0F0F0F; const int qh = get_int_from_uint8_aligned(bxi->qh, kqsx % (QI5_K/4)); const int qh0 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 0)) << 4) & 0x10101010; const int qh1 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 1)) << 4) & 0x10101010; const int kq0 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + 0; const int kq1 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + (QI5_K/4); x_ql[i * (2*WARP_SIZE + 1) + kq0] = ql0 | qh0; x_ql[i * (2*WARP_SIZE + 1) + kq1] = ql1 | qh1; } const int blocks_per_tile_x_row = WARP_SIZE / QI5_K; // == 1 if QK_K == 256 const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_K) { int i = (i0 + i_offset * QI5_K + k / blocks_per_tile_x_row) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q5_K * bxi = bx0 + i*blocks_per_row + kbxd; #if QK_K == 256 x_dm[i * (WARP_SIZE/QI5_K) + i / QI5_K + kbxd] = bxi->dm; #endif } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q5_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI5_K/8); const int * scales = (const int *) bxi->scales; const int ksc = k % (WARP_SIZE/8); // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8 int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8; } } static __device__ __forceinline__ float vec_dot_q5_K_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { GGML_UNUSED(x_qh); const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2 * ((k % 16) / 8); const int index_x = i * (QR5_K*WARP_SIZE + 1) + QR5_K*k; const int index_y = j * WARP_SIZE + (QR5_K*k) % WARP_SIZE; return vec_dot_q5_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, sc+8, x_dm[i * (WARP_SIZE/QI5_K) + i/QI5_K], &y_ds[index_y/QI8_1]); } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q6_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { GGML_UNUSED(x_qh); __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI6_K) + mmq_y/QI6_K]; __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; *x_sc = tile_x_sc; } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q6_K( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { GGML_UNUSED(x_qh); GGML_CUDA_ASSUME(i_offset >= 0); GGML_CUDA_ASSUME(i_offset < nwarps); GGML_CUDA_ASSUME(k >= 0); GGML_CUDA_ASSUME(k < WARP_SIZE); const int kbx = k / QI6_K; // == 0 if QK_K == 256 const int kqsx = k % QI6_K; // == k if QK_K == 256 const block_q6_K * bx0 = (const block_q6_K *) vx; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q6_K * bxi = bx0 + i*blocks_per_row + kbx; const int ky = QR6_K*kqsx; const int ql = get_int_from_uint8(bxi->ql, kqsx); const int ql0 = (ql >> 0) & 0x0F0F0F0F; const int ql1 = (ql >> 4) & 0x0F0F0F0F; const int qh = get_int_from_uint8(bxi->qh, (QI6_K/4) * (kqsx / (QI6_K/2)) + kqsx % (QI6_K/4)); const int qh0 = ((qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) << 4) & 0x30303030; const int qh1 = (qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) & 0x30303030; const int kq0 = ky - ky % QI6_K + k % (QI6_K/2) + 0; const int kq1 = ky - ky % QI6_K + k % (QI6_K/2) + (QI6_K/2); x_ql[i * (2*WARP_SIZE + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020); x_ql[i * (2*WARP_SIZE + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020); } const int blocks_per_tile_x_row = WARP_SIZE / QI6_K; // == 1 if QK_K == 256 const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 float * x_dmf = (float *) x_dm; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI6_K) { int i = (i0 + i_offset * QI6_K + k / blocks_per_tile_x_row) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q6_K * bxi = bx0 + i*blocks_per_row + kbxd; x_dmf[i * (WARP_SIZE/QI6_K) + i / QI6_K + kbxd] = bxi->d; } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y; if (need_check) { i = min(i, i_max); } const block_q6_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / 4; x_sc[i * (WARP_SIZE/8) + i / 8 + k % (WARP_SIZE/8)] = get_int_from_int8(bxi->scales, k % (QI6_K/8)); } } static __device__ __forceinline__ float vec_dot_q6_K_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { GGML_UNUSED(x_qh); const float * x_dmf = (const float *) x_dm; const float * y_df = (const float *) y_ds; const int8_t * sc = ((const int8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/8]); const int index_x = i * (QR6_K*WARP_SIZE + 1) + QR6_K*k; const int index_y = j * WARP_SIZE + (QR6_K*k) % WARP_SIZE; return vec_dot_q6_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, x_dmf[i * (WARP_SIZE/QI6_K) + i/QI6_K], &y_df[index_y/QI8_1]); } static __device__ __forceinline__ float vec_dot_q4_0_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); const float * x_dmf = (const float *) x_dm; int u[2*VDR_Q4_0_Q8_1_MMQ]; #pragma unroll for (int l = 0; l < VDR_Q4_0_Q8_1_MMQ; ++l) { u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_0) % WARP_SIZE]; } return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMQ> (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dmf[i * (WARP_SIZE/QI4_0) + i/QI4_0 + k/QI4_0], y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); } static __device__ __forceinline__ float vec_dot_q4_1_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { GGML_UNUSED(x_qh); GGML_UNUSED(x_sc); const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); int u[2*VDR_Q4_1_Q8_1_MMQ]; #pragma unroll for (int l = 0; l < VDR_Q4_1_Q8_1_MMQ; ++l) { u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_1) % WARP_SIZE]; } return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMQ> (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dm[i * (WARP_SIZE/QI4_1) + i/QI4_1 + k/QI4_1], y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); } extern "C" __global__ void mul_mat_q4_0( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q4_0_AMPERE; const int mmq_y = MMQ_Y_Q4_0_AMPERE; const int nwarps = NWARPS_Q4_0_AMPERE; mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>, load_tiles_q4_0<mmq_y, nwarps, true>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q4_1( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q4_1_AMPERE; const int mmq_y = MMQ_Y_Q4_1_AMPERE; const int nwarps = NWARPS_Q4_1_AMPERE; mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>, load_tiles_q4_1<mmq_y, nwarps, true>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q5_0( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q5_0_AMPERE; const int mmq_y = MMQ_Y_Q5_0_AMPERE; const int nwarps = NWARPS_Q5_0_AMPERE; mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>, load_tiles_q5_0<mmq_y, nwarps, true>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q5_1( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q5_1_AMPERE; const int mmq_y = MMQ_Y_Q5_1_AMPERE; const int nwarps = NWARPS_Q5_1_AMPERE; mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>, load_tiles_q5_1<mmq_y, nwarps, true>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q8_0( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q8_0_AMPERE; const int mmq_y = MMQ_Y_Q8_0_AMPERE; const int nwarps = NWARPS_Q8_0_AMPERE; mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>, load_tiles_q8_0<mmq_y, nwarps, true>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q2_K( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q2_K_AMPERE; const int mmq_y = MMQ_Y_Q2_K_AMPERE; const int nwarps = NWARPS_Q2_K_AMPERE; mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>, load_tiles_q2_K<mmq_y, nwarps, true>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q3_K( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q3_K_AMPERE; const int mmq_y = MMQ_Y_Q3_K_AMPERE; const int nwarps = NWARPS_Q3_K_AMPERE; mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>, load_tiles_q3_K<mmq_y, nwarps, true>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q4_K( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q4_K_AMPERE; const int mmq_y = MMQ_Y_Q4_K_AMPERE; const int nwarps = NWARPS_Q4_K_AMPERE; mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>, load_tiles_q4_K<mmq_y, nwarps, true>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q5_K( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q5_K_AMPERE; const int mmq_y = MMQ_Y_Q5_K_AMPERE; const int nwarps = NWARPS_Q5_K_AMPERE; mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>, load_tiles_q5_K<mmq_y, nwarps, true>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q6_K( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q6_K_AMPERE; const int mmq_y = MMQ_Y_Q6_K_AMPERE; const int nwarps = NWARPS_Q6_K_AMPERE; mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>, load_tiles_q6_K<mmq_y, nwarps, true>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); }
candle/candle-kernels/src/quantized.cu/0
{ "file_path": "candle/candle-kernels/src/quantized.cu", "repo_id": "candle", "token_count": 85791 }
use crate::utils::EncoderProvider; use crate::{ConstantValues, Kernels, MetalKernelError, Source, Value}; use metal::{Buffer, ComputeCommandEncoderRef, Device, MTLSize, NSUInteger}; use std::ffi::c_void; #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub enum GemmDType { BF16, F16, F32, } #[allow(clippy::too_many_arguments)] pub fn call_mlx_gemm( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, dtype: GemmDType, (b, m, n, k): (usize, usize, usize, usize), lhs_stride: &[usize], lhs_offset: usize, lhs_buffer: &Buffer, rhs_stride: &[usize], rhs_offset: usize, rhs_buffer: &Buffer, output: &Buffer, ) -> Result<(), MetalKernelError> { #[derive(Debug)] #[repr(C)] struct GemmParams { m: i32, n: i32, k: i32, lda: i32, ldb: i32, ldd: i32, tiles_n: i32, tiles_m: i32, batch_stride_a: isize, batch_stride_b: isize, batch_stride_d: isize, swizzle_log: i32, gemm_k_iterations_aligned: i32, batch_ndim: i32, } assert!(rhs_stride.len() >= 2); assert!(lhs_stride.len() >= 2); let rhs_m1 = rhs_stride[rhs_stride.len() - 1]; let rhs_m2 = rhs_stride[rhs_stride.len() - 2]; let lhs_m1 = lhs_stride[lhs_stride.len() - 1]; let lhs_m2 = lhs_stride[lhs_stride.len() - 2]; // lhs has shape b, m, k // We also allow for the case where the stride on the minor dimension is not as expected but // there is a single element. let (lda, a_trans) = if (lhs_m1 == 1 || k == 1) && (lhs_m2 == k || m == 1) { (k as i32, false) } else if (lhs_m1 == m || k == 1) && (lhs_m2 == 1 || m == 1) { (m as i32, true) } else { return Err(MetalKernelError::MatMulNonContiguous { lhs_stride: lhs_stride.to_vec(), rhs_stride: rhs_stride.to_vec(), mnk: (m, n, k), })?; }; // rhs has shape b, k, n let (ldb, b_trans) = if (rhs_m1 == 1 || n == 1) && (rhs_m2 == n || k == 1) { (n as i32, false) } else if (rhs_m1 == k || n == 1) && (rhs_m2 == 1 || k == 1) { (k as i32, true) } else { return Err(MetalKernelError::MatMulNonContiguous { lhs_stride: lhs_stride.to_vec(), rhs_stride: rhs_stride.to_vec(), mnk: (m, n, k), })?; }; let (bm, bn, bk, wn, wm) = (32, 32, 16, 2, 2); // https://github.com/ml-explore/mlx/blob/02efb310cac667bc547d1b96f21596c221f84fe7/mlx/backend/metal/matmul.cpp#L422 let constants = Some(ConstantValues::new(vec![ (10, Value::Bool(/* has_batch */ b > 1)), (100, Value::Bool(/* use_out_source */ false)), (110, Value::Bool(/* do_axpby */ false)), (200, Value::Bool(/* align_m */ m % bm == 0)), (201, Value::Bool(/* align_n */ n % bn == 0)), (202, Value::Bool(/* align_k */ k % bk == 0)), (300, Value::Bool(/* do_gather */ false)), ])); let swizzle_log = 0; let tile = 1 << swizzle_log; let tn = n.div_ceil(bn); let tm = m.div_ceil(bm); let tn = tn * tile; let tm = tm.div_ceil(tile); let batch_stride_a = if lhs_stride.len() > 2 { lhs_stride[lhs_stride.len() - 3] } else { m * k }; let batch_stride_b = if rhs_stride.len() > 2 { rhs_stride[rhs_stride.len() - 3] } else { n * k }; let gemm_params = GemmParams { m: m as i32, n: n as i32, k: k as i32, lda, ldb, ldd: n as i32, tiles_n: tn as i32, tiles_m: tm as i32, swizzle_log, batch_stride_a: batch_stride_a as isize, batch_stride_b: batch_stride_b as isize, batch_stride_d: (m * n) as isize, batch_ndim: 1i32, gemm_k_iterations_aligned: (k / bk) as i32, }; let batch_strides = [gemm_params.batch_stride_a, gemm_params.batch_stride_b]; // TODO(laurent): generate the name // template [[host_name("gemm_" #tname "_" #iname "_" #oname "_bm" #bm "_bn" #bn "_bk" #bk "_wm" #wm "_wn" #wn)]] let name = match (dtype, a_trans, b_trans) { (GemmDType::F32, false, false) => "gemm_nn_f32_f32_32_32_16_2_2", (GemmDType::F32, true, false) => "gemm_tn_f32_f32_32_32_16_2_2", (GemmDType::F32, false, true) => "gemm_nt_f32_f32_32_32_16_2_2", (GemmDType::F32, true, true) => "gemm_tt_f32_f32_32_32_16_2_2", (GemmDType::BF16, false, false) => "gemm_nn_bf16_bf16_32_32_16_2_2", (GemmDType::BF16, true, false) => "gemm_tn_bf16_bf16_32_32_16_2_2", (GemmDType::BF16, false, true) => "gemm_nt_bf16_bf16_32_32_16_2_2", (GemmDType::BF16, true, true) => "gemm_tt_bf16_bf16_32_32_16_2_2", (GemmDType::F16, false, false) => "gemm_nn_f16_f16_32_32_16_2_2", (GemmDType::F16, true, false) => "gemm_tn_f16_f16_32_32_16_2_2", (GemmDType::F16, false, true) => "gemm_nt_f16_f16_32_32_16_2_2", (GemmDType::F16, true, true) => "gemm_tt_f16_f16_32_32_16_2_2", }; let pipeline = kernels.load_pipeline_with_constants(device, Source::Gemm, name, constants)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); encoder.set_buffer(0, Some(lhs_buffer), lhs_offset as NSUInteger); encoder.set_buffer(1, Some(rhs_buffer), rhs_offset as NSUInteger); encoder.set_buffer(3, Some(output), 0); encoder.set_bytes( 4, std::mem::size_of::<GemmParams>() as u64, &gemm_params as *const GemmParams as *const c_void, ); encoder.set_bytes( 6, // batch_shape std::mem::size_of::<i32>() as u64, &(b as i32) as *const i32 as *const c_void, ); encoder.set_bytes( 7, (std::mem::size_of::<isize>() * batch_strides.len()) as u64, batch_strides.as_ptr() as *const c_void, ); let grid_size = MTLSize { width: tn as u64, height: tm as u64, depth: /* batch_size_out */ b as u64, }; let group_size = MTLSize { width: 32, height: wn, depth: wm, }; encoder.use_resource(lhs_buffer, metal::MTLResourceUsage::Read); encoder.use_resource(rhs_buffer, metal::MTLResourceUsage::Read); encoder.use_resource(output, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(grid_size, group_size); Ok(()) }
candle/candle-metal-kernels/src/mlx_gemm.rs/0
{ "file_path": "candle/candle-metal-kernels/src/mlx_gemm.rs", "repo_id": "candle", "token_count": 3374 }
use candle_metal_kernels::{call_unary_contiguous, call_unary_strided, unary, Kernels}; use half::{bf16, f16}; use metal::objc::rc::autoreleasepool; use metal::{Device, MTLResourceOptions}; use rand; use std::any::type_name; use std::time::Instant; fn main() { let device = Device::system_default().unwrap(); let kernels = Kernels::new(); let f32_1k = (0..1000).map(|_| rand::random::<f32>()).collect::<Vec<_>>(); let f32_10k = (0..10000) .map(|_| rand::random::<f32>()) .collect::<Vec<_>>(); let f32_100k = (0..100000) .map(|_| rand::random::<f32>()) .collect::<Vec<_>>(); let f16_map = |v: &[f32]| v.iter().map(|v| f16::from_f32(*v)).collect::<Vec<_>>(); let f16_1k = f16_map(&f32_1k); let f16_10k = f16_map(&f32_10k); let f16_100k = f16_map(&f32_100k); let bf16_map = |v: &[f32]| v.iter().map(|v| bf16::from_f32(*v)).collect::<Vec<_>>(); let bf16_1k = bf16_map(&f32_1k); let bf16_10k = bf16_map(&f32_10k); let bf16_100k = bf16_map(&f32_100k); let f32_ckernels = [ unary::contiguous::sin::FLOAT, unary::contiguous::cos::FLOAT, unary::contiguous::exp::FLOAT, unary::contiguous::sqr::FLOAT, unary::contiguous::sqrt::FLOAT, unary::contiguous::neg::FLOAT, unary::contiguous::copy::FLOAT, ]; let f32_skernels = [ unary::strided::sin::FLOAT, unary::strided::cos::FLOAT, unary::strided::exp::FLOAT, unary::strided::sqr::FLOAT, unary::strided::sqrt::FLOAT, unary::strided::neg::FLOAT, unary::strided::copy::FLOAT, ]; let f16_ckernels = [ unary::contiguous::sin::HALF, unary::contiguous::cos::HALF, unary::contiguous::exp::HALF, unary::contiguous::sqr::HALF, unary::contiguous::sqrt::HALF, unary::contiguous::neg::HALF, unary::contiguous::copy::HALF, ]; let f16_skernels = [ unary::strided::sin::HALF, unary::strided::cos::HALF, unary::strided::exp::HALF, unary::strided::sqr::HALF, unary::strided::sqrt::HALF, unary::strided::neg::HALF, unary::strided::copy::HALF, ]; let bf16_ckernels = [ unary::contiguous::sin::BFLOAT, unary::contiguous::cos::BFLOAT, unary::contiguous::exp::BFLOAT, unary::contiguous::sqr::BFLOAT, unary::contiguous::sqrt::BFLOAT, unary::contiguous::neg::BFLOAT, unary::contiguous::copy::BFLOAT, ]; let bf16_skernels = [ unary::strided::sin::BFLOAT, unary::strided::cos::BFLOAT, unary::strided::exp::BFLOAT, unary::strided::sqr::BFLOAT, unary::strided::sqrt::BFLOAT, unary::strided::neg::BFLOAT, unary::strided::copy::BFLOAT, ]; println!( "{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11} | {5: <11}", "dtype", "kernel", "size", "runs", "total time", "avg time" ); // f32 run_unary_bench(&device, &kernels, &f32_1k, f32_ckernels, f32_skernels); run_unary_bench(&device, &kernels, &f32_10k, f32_ckernels, f32_skernels); run_unary_bench(&device, &kernels, &f32_100k, f32_ckernels, f32_skernels); // f16 run_unary_bench(&device, &kernels, &f16_1k, f16_ckernels, f16_skernels); run_unary_bench(&device, &kernels, &f16_10k, f16_ckernels, f16_skernels); run_unary_bench(&device, &kernels, &f16_100k, f16_ckernels, f16_skernels); // bf16 run_unary_bench(&device, &kernels, &bf16_1k, bf16_ckernels, bf16_skernels); run_unary_bench(&device, &kernels, &bf16_10k, bf16_ckernels, bf16_skernels); run_unary_bench(&device, &kernels, &bf16_100k, bf16_ckernels, bf16_skernels); } fn run_unary_bench<T: Clone>( device: &Device, kernels: &Kernels, v: &[T], contiguous: [unary::contiguous::Kernel; 7], strided: [unary::strided::Kernel; 7], ) { let command_queue = device.new_command_queue(); let options = MTLResourceOptions::StorageModeManaged; let iterations = 10000; let input = device.new_buffer_with_data( v.as_ptr() as *const core::ffi::c_void, core::mem::size_of_val(v) as u64, options, ); let mut output = device.new_buffer(core::mem::size_of_val(v) as u64, options); // Contiguous for kernel_name in contiguous { let total_time = autoreleasepool(|| { let command_buffer = command_queue.new_command_buffer(); let start = Instant::now(); for _ in 0..iterations { call_unary_contiguous( device, &command_buffer, kernels, kernel_name, v.len(), &input, &mut output, ) .unwrap(); } command_buffer.commit(); command_buffer.wait_until_completed(); start.elapsed() }); println!( "{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11?} | {5: <11?}", type_name::<T>().split("::").last().unwrap(), kernel_name.0, v.len(), iterations, total_time, total_time / iterations ); } // Strided let shape = vec![2, 5_000]; let strides = vec![2, 1]; let offset = 0; for kernel_name in &strided { let total_time = autoreleasepool(|| { let command_buffer = command_queue.new_command_buffer(); let start = Instant::now(); for _ in 0..iterations { call_unary_strided( device, command_buffer, &kernels, kernel_name, &shape, &input, &strides, offset, &mut output, 0, ) .unwrap(); } command_buffer.commit(); command_buffer.wait_until_completed(); start.elapsed() }); println!( "{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11?} | {5: <11?}", type_name::<T>().split("::").last().unwrap(), kernel_name.0, v.len(), iterations, total_time, total_time / iterations ); } }
candle/candle-metal-kernels/tmp/unary.rs/0
{ "file_path": "candle/candle-metal-kernels/tmp/unary.rs", "repo_id": "candle", "token_count": 3489 }
//! Group Normalization. //! //! This layer applies Group Normalization over a mini-batch of inputs. use candle::{DType, Result, Tensor}; // This group norm version handles both weight and bias so removes the mean. #[derive(Clone, Debug)] pub struct GroupNorm { weight: Tensor, bias: Tensor, eps: f64, num_channels: usize, num_groups: usize, } impl GroupNorm { pub fn new( weight: Tensor, bias: Tensor, num_channels: usize, num_groups: usize, eps: f64, ) -> Result<Self> { if num_channels % num_groups != 0 { candle::bail!( "GroupNorm: num_groups ({num_groups}) must divide num_channels ({num_channels})" ) } Ok(Self { weight, bias, eps, num_channels, num_groups, }) } } impl crate::Module for GroupNorm { fn forward(&self, x: &Tensor) -> Result<Tensor> { let x_shape = x.dims(); if x_shape.len() <= 2 { candle::bail!("input rank for GroupNorm should be at least 3"); } let (b_sz, n_channels) = (x_shape[0], x_shape[1]); let hidden_size = x_shape[2..].iter().product::<usize>() * n_channels / self.num_groups; if n_channels != self.num_channels { candle::bail!( "unexpected num-channels in GroupNorm ({n_channels} <> {}", self.num_channels ) } let x_dtype = x.dtype(); let internal_dtype = match x_dtype { DType::F16 | DType::BF16 => DType::F32, d => d, }; let x = x.reshape((b_sz, self.num_groups, hidden_size))?; let x = x.to_dtype(internal_dtype)?; let mean_x = (x.sum_keepdim(2)? / hidden_size as f64)?; let x = x.broadcast_sub(&mean_x)?; let norm_x = (x.sqr()?.sum_keepdim(2)? / hidden_size as f64)?; let x_normed = x.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?; let mut w_dims = vec![1; x_shape.len()]; w_dims[1] = n_channels; let weight = self.weight.reshape(w_dims.clone())?; let bias = self.bias.reshape(w_dims)?; x_normed .to_dtype(x_dtype)? .reshape(x_shape)? .broadcast_mul(&weight)? .broadcast_add(&bias) } } pub fn group_norm( num_groups: usize, num_channels: usize, eps: f64, vb: crate::VarBuilder, ) -> Result<GroupNorm> { let weight = vb.get_with_hints(num_channels, "weight", crate::Init::Const(1.))?; let bias = vb.get_with_hints(num_channels, "bias", crate::Init::Const(0.))?; GroupNorm::new(weight, bias, num_channels, num_groups, eps) }
candle/candle-nn/src/group_norm.rs/0
{ "file_path": "candle/candle-nn/src/group_norm.rs", "repo_id": "candle", "token_count": 1372 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::{Device, Result, Tensor}; #[test] fn kv_cache() -> Result<()> { let mut cache = candle_nn::kv_cache::Cache::new(0, 16); for _ in [0, 1] { assert_eq!(cache.current_seq_len(), 0); let data = cache.current_data()?; assert!(data.is_none()); let t = Tensor::new(&[1f32, 2., 3.], &Device::Cpu)?; cache.append(&t)?; let data = cache.current_data()?.unwrap(); assert_eq!(data.to_vec1::<f32>()?, [1., 2., 3.]); let t = Tensor::new(&[4f32], &Device::Cpu)?; cache.append(&t)?; let data = cache.current_data()?.unwrap(); assert_eq!(data.to_vec1::<f32>()?, [1., 2., 3., 4.]); let t = Tensor::new(&[0f32, 5., 6., 7.], &Device::Cpu)?; cache.append(&t)?; let data = cache.current_data()?.unwrap(); assert_eq!(data.to_vec1::<f32>()?, [1., 2., 3., 4., 0., 5., 6., 7.]); assert_eq!(cache.current_seq_len(), 8); cache.reset(); } Ok(()) } #[test] fn rotating_kv_cache() -> Result<()> { let mut cache = candle_nn::kv_cache::RotatingCache::new(0, 6); for _ in [0, 1] { assert_eq!(cache.offset(), 0); assert_eq!(cache.current_seq_len(), 0); let data = cache.current_data()?; assert!(data.is_none()); let t = Tensor::new(&[1., 2., 3.], &Device::Cpu)?; let data = cache.append(&t)?; assert_eq!(data.to_vec1::<f64>()?, [1., 2., 3.]); let t = Tensor::new(&[4.], &Device::Cpu)?; let data = cache.append(&t)?; assert_eq!(data.to_vec1::<f64>()?, [1., 2., 3., 4.]); let t = Tensor::new(&[0., 5., 6., 7.], &Device::Cpu)?; let data = cache.append(&t)?; assert_eq!(data.to_vec1::<f64>()?, [6., 7., 3., 4., 0., 5.]); assert_eq!(cache.current_seq_len(), 8); assert_eq!(cache.offset(), 2); let t = Tensor::new(&[8.], &Device::Cpu)?; let data = cache.append(&t)?; assert_eq!(data.to_vec1::<f64>()?, [6., 7., 8., 4., 0., 5.]); assert_eq!(cache.current_seq_len(), 9); assert_eq!(cache.offset(), 3); let t = Tensor::new(&[9., 10., 11.], &Device::Cpu)?; let data = cache.append(&t)?; assert_eq!(data.to_vec1::<f64>()?, [6., 7., 8., 9., 10., 11.]); assert_eq!(cache.current_seq_len(), 12); assert_eq!(cache.offset(), 0); let t = Tensor::new(&[12.], &Device::Cpu)?; let data = cache.append(&t)?; assert_eq!(data.to_vec1::<f64>()?, [12., 7., 8., 9., 10., 11.]); assert_eq!(cache.current_seq_len(), 13); assert_eq!(cache.offset(), 1); let mask = cache.attn_mask(2, &Device::Cpu)?.unwrap(); assert_eq!( mask.to_vec2::<u8>()?, &[[0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0]] ); let mask = cache.attn_mask(3, &Device::Cpu)?.unwrap(); assert_eq!( mask.to_vec2::<u8>()?, &[[0, 0, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0]], ); let t = Tensor::new(&[0., 1., 2., 3., 4., 5., 6., 7., 8.], &Device::Cpu)?; let data = cache.append(&t)?; assert_eq!(data.to_vec1::<f64>()?, [0., 1., 2., 3., 4., 5., 6., 7., 8.]); assert_eq!(cache.current_seq_len(), 22); assert_eq!(cache.offset(), 0); let mask = cache.attn_mask(1, &Device::Cpu)?; assert!(mask.is_none()); let mask = cache.attn_mask(2, &Device::Cpu)?.unwrap(); assert_eq!( mask.to_vec2::<u8>()?, &[[0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]] ); let mask = cache.attn_mask(3, &Device::Cpu)?.unwrap(); assert_eq!( mask.to_vec2::<u8>()?, &[[0, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0]] ); let t = Tensor::new(&[42.], &Device::Cpu)?; let data = cache.append(&t)?; assert_eq!(data.to_vec1::<f64>()?, [42., 4., 5., 6., 7., 8.]); assert_eq!(cache.current_seq_len(), 23); assert_eq!(cache.offset(), 1); cache.reset(); } Ok(()) }
candle/candle-nn/tests/kv_cache.rs/0
{ "file_path": "candle/candle-nn/tests/kv_cache.rs", "repo_id": "candle", "token_count": 2202 }
[package] name = "candle-pyo3" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true readme = "README.md" [lib] name = "candle" crate-type = ["cdylib"] [dependencies] accelerate-src = { workspace = true, optional = true } candle = { workspace = true } candle-nn = { workspace = true } candle-onnx = { workspace = true, optional = true } half = { workspace = true } intel-mkl-src = { workspace = true, optional = true } pyo3 = { version = "0.22.0", features = ["extension-module", "abi3-py311"] } [build-dependencies] pyo3-build-config = "0.22" [features] default = [] accelerate = ["dep:accelerate-src", "candle/accelerate"] cuda = ["candle/cuda"] mkl = ["dep:intel-mkl-src","candle/mkl"] onnx = ["dep:candle-onnx"]
candle/candle-pyo3/Cargo.toml/0
{ "file_path": "candle/candle-pyo3/Cargo.toml", "repo_id": "candle", "token_count": 315 }
from candle import Tensor, QTensor, DType from typing import ( Dict, Tuple, Any, Optional, Union, Iterator, Set, overload, Mapping, TypeVar, List, ) from collections import OrderedDict, namedtuple TensorLike = Union[Tensor, QTensor] T = TypeVar("T", bound="Module") class _IncompatibleKeys(namedtuple("IncompatibleKeys", ["missing_keys", "unexpected_keys"])): def __repr__(self): if not self.missing_keys and not self.unexpected_keys: return "<All keys matched successfully>" return super().__repr__() __str__ = __repr__ # see: https://github.com/pytorch/pytorch/blob/main/torch/nn/modules/module.py class Module: """ Pytorch like Module. Base class for all neural network modules. Your models should also subclass this class. """ _modules: Dict[str, Optional["Module"]] _buffers: Dict[str, Optional[TensorLike]] _non_persistent_buffers_set: Set[str] _quantizable_buffers: Set[str] _version: int = 1 def __init__(self, *args, **kwargs) -> None: """ Initializes internal Module state """ super().__setattr__("_modules", OrderedDict()) super().__setattr__("_buffers", OrderedDict()) super().__setattr__("_non_persistent_buffers_set", set()) super().__setattr__("_quantizable_buffers", set()) def __call__(self, *input): """ Call self as a function. """ return self.forward(*input) def forward(self, *input): """ Defines the computation performed at every call. Should be overridden by all subclasses. """ pass def children(self) -> Iterator["Module"]: r"""Returns an iterator over immediate children modules. Yields: Module: a child module """ for name, module in self.named_children(): yield module def named_children(self) -> Iterator[Tuple[str, "Module"]]: r"""Returns an iterator over immediate children modules, yielding both the name of the module as well as the module itself. Yields: (str, Module): Tuple containing a name and child module Example:: >>> for name, module in model.named_children(): >>> if name in ['conv4', 'conv5']: >>> print(module) """ memo = set() for name, module in self._modules.items(): if module is not None and module not in memo: memo.add(module) yield name, module def add_module(self, name: str, module: Optional["Module"]) -> None: r"""Adds a child module to the current module. The module can be accessed as an attribute using the given name. Args: name (str): name of the child module. The child module can be accessed from this module using the given name module (Module): child module to be added to the module. """ if not isinstance(module, Module) and module is not None: raise TypeError(f"{str(module)} is not a Module subclass") elif not isinstance(name, str): raise TypeError(f"module name should be a string. Got {name}") elif hasattr(self, name) and name not in self._modules: raise KeyError(f"attribute '{name}' already exists") elif "." in name: raise KeyError(f'module name can\'t contain ".", got: {name}') elif name == "": raise KeyError('module name can\'t be empty string ""') self._modules[name] = module def register_module(self, name: str, module: Optional["Module"]) -> None: r"""Alias for :func:`add_module`.""" self.add_module(name, module) def modules(self) -> Iterator["Module"]: r"""Returns an iterator over all modules in the network.""" for _, module in self.named_modules(): yield module def named_modules( self, memo: Optional[Set["Module"]] = None, prefix: str = "", remove_duplicate: bool = True, ): r"""Returns an iterator over all modules in the network, yielding both the name of the module as well as the module itself. Args: memo: a memo to store the set of modules already added to the result prefix: a prefix that will be added to the name of the module remove_duplicate: whether to remove the duplicated module instances in the result or not Yields: (str, Module): Tuple of name and module Note: Duplicate modules are returned only once. In the following example, ``l`` will be returned only once. """ if memo is None: memo = set() if self not in memo: if remove_duplicate: memo.add(self) yield prefix, self for name, module in self._modules.items(): if module is None: continue submodule_prefix = prefix + ("." if prefix else "") + name for m in module.named_modules(memo, submodule_prefix, remove_duplicate): yield m def buffers(self, recurse: bool = True) -> Iterator[TensorLike]: """ Returns an iterator over module buffers. """ for name, buf in self.named_buffers(recurse=recurse): yield buf def named_buffers( self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True ) -> Iterator[Tuple[str, TensorLike]]: r"""Returns an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself. Args: prefix (str): prefix to prepend to all buffer names. recurse (bool, optional): if True, then yields buffers of this module and all submodules. Otherwise, yields only buffers that are direct members of this module. Defaults to True. remove_duplicate (bool, optional): whether to remove the duplicated buffers in the result. Defaults to True. Yields: (str, Tensor): Tuple containing the name and buffer Example:: >>> for name, buf in self.named_buffers(): >>> if name in ['running_var']: >>> print(buf.size()) """ gen = self._named_members( lambda module: module._buffers.items(), prefix=prefix, recurse=recurse, remove_duplicate=remove_duplicate, ) yield from gen # The user can pass an optional arbitrary mappable object to `state_dict`, in which case `state_dict` returns # back that same object. But if they pass nothing, an `OrderedDict` is created and returned. T_destination = TypeVar("T_destination", bound=Dict[str, Any]) @overload def state_dict(self, *, destination: T_destination, prefix: str = ..., keep_vars: bool = ...) -> T_destination: ... @overload def state_dict(self, *, prefix: str = ..., keep_vars: bool = ...) -> Dict[str, Any]: ... def state_dict(self, *args, destination=None, prefix="", keep_vars=False): r"""Returns a dictionary containing references to the whole state of the module. Both parameters and persistent buffers (e.g. running averages) are included. Keys are corresponding parameter and buffer names. Parameters and buffers set to ``None`` are not included. .. note:: The returned object is a shallow copy. It contains references to the module's parameters and buffers. .. warning:: Currently ``state_dict()`` also accepts positional arguments for ``destination``, ``prefix`` and ``keep_vars`` in order. However, this is being deprecated and keyword arguments will be enforced in future releases. .. warning:: Please avoid the use of argument ``destination`` as it is not designed for end-users. Args: destination (dict, optional): If provided, the state of module will be updated into the dict and the same object is returned. Otherwise, an ``OrderedDict`` will be created and returned. Default: ``None``. prefix (str, optional): a prefix added to parameter and buffer names to compose the keys in state_dict. Default: ``''``. keep_vars (bool, optional): by default the :class:`~candle.Tensor` s returned in the state dict are detached from autograd. If it's set to ``True``, detaching will not be performed. Default: ``False``. Returns: dict: a dictionary containing a whole state of the module Example:: >>> # xdoctest: +SKIP("undefined vars") >>> module.state_dict().keys() ['bias', 'weight'] """ # TODO: Remove `args` and the parsing logic when BC allows. if len(args) > 0: if destination is None: destination = args[0] if len(args) > 1 and prefix == "": prefix = args[1] if len(args) > 2 and keep_vars is False: keep_vars = args[2] if destination is None: destination = OrderedDict() destination._metadata = OrderedDict() local_metadata = dict(version=self._version) if hasattr(destination, "_metadata"): destination._metadata[prefix[:-1]] = local_metadata self._save_to_state_dict(destination, prefix, keep_vars) for name, module in self._modules.items(): if module is not None: module.state_dict( destination=destination, prefix=prefix + name + ".", keep_vars=keep_vars, ) return destination def _save_to_state_dict(self, destination, prefix, keep_vars): r"""Saves module state to `destination` dictionary, containing a state of the module, but not its descendants. This is called on every submodule in :meth:`~candle.nn.Module.state_dict`. In rare cases, subclasses can achieve class-specific behavior by overriding this method with custom logic. Args: destination (dict): a dict where state will be stored prefix (str): the prefix for parameters and buffers used in this module """ for name, buf in self._buffers.items(): if buf is not None and name not in self._non_persistent_buffers_set: if isinstance(buf, Tensor): destination[prefix + name] = buf if keep_vars else buf.detach() else: destination[prefix + name] = buf def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False): r"""Copies parameters and buffers from :attr:`state_dict` into this module and its descendants. If :attr:`strict` is ``True``, then the keys of :attr:`state_dict` must exactly match the keys returned by this module's :meth:`~candle.nn.Module.state_dict` function. .. warning:: If :attr:`assign` is ``True`` the optimizer must be created after the call to :attr:`load_state_dict`. Args: state_dict (dict): a dict containing parameters and persistent buffers. strict (bool, optional): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~candle.nn.Module.state_dict` function. Default: ``True`` assign (bool, optional): whether to assign items in the state dictionary to their corresponding keys in the module instead of copying them inplace into the module's current parameters and buffers. When ``False``, the properties of the tensors in the current module are preserved while when ``True``, the properties of the Tensors in the state dict are preserved. Default: ``False`` Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing the missing keys * **unexpected_keys** is a list of str containing the unexpected keys Note: If a parameter or buffer is registered as ``None`` and its corresponding key exists in :attr:`state_dict`, :meth:`load_state_dict` will raise a ``RuntimeError``. """ if not isinstance(state_dict, Mapping): raise TypeError(f"Expected state_dict to be dict-like, got {type(state_dict)}.") missing_keys: List[str] = [] unexpected_keys: List[str] = [] error_msgs: List[str] = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = OrderedDict(state_dict) if metadata is not None: # mypy isn't aware that "_metadata" exists in state_dict state_dict._metadata = metadata # type: ignore[attr-defined] def load(module, local_state_dict, prefix=""): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) if assign: local_metadata["assign_to_params_buffers"] = assign module._load_from_state_dict( local_state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs, ) for name, child in module._modules.items(): if child is not None: child_prefix = prefix + name + "." child_state_dict = {k: v for k, v in local_state_dict.items() if k.startswith(child_prefix)} load(child, child_state_dict, child_prefix) load(self, state_dict) del load if strict: if len(unexpected_keys) > 0: error_msgs.insert( 0, "Unexpected key(s) in state_dict: {}. ".format(", ".join(f'"{k}"' for k in unexpected_keys)), ) if len(missing_keys) > 0: error_msgs.insert( 0, "Missing key(s) in state_dict: {}. ".format(", ".join(f'"{k}"' for k in missing_keys)), ) if len(error_msgs) > 0: raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format(self.__class__.__name__, "\n\t".join(error_msgs)) ) return _IncompatibleKeys(missing_keys, unexpected_keys) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, ): r"""Copies parameters and buffers from :attr:`state_dict` into only this module, but not its descendants. This is called on every submodule in :meth:`~candle.nn.Module.load_state_dict`. Metadata saved for this module in input :attr:`state_dict` is provided as :attr:`local_metadata`. For state dicts without metadata, :attr:`local_metadata` is empty. Subclasses can achieve class-specific backward compatible loading using the version number at `local_metadata.get("version", None)`. Additionally, :attr:`local_metadata` can also contain the key `assign_to_params_buffers` that indicates whether keys should be assigned their corresponding tensor in the state_dict. .. note:: :attr:`state_dict` is not the same object as the input :attr:`state_dict` to :meth:`~candle.nn.Module.load_state_dict`. So it can be modified. Args: state_dict (dict): a dict containing parameters and persistent buffers. prefix (str): the prefix for parameters and buffers used in this module local_metadata (dict): a dict containing the metadata for this module. See strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` with :attr:`prefix` match the names of parameters and buffers in this module missing_keys (list of str): if ``strict=True``, add missing keys to this list unexpected_keys (list of str): if ``strict=True``, add unexpected keys to this list error_msgs (list of str): error messages should be added to this list, and will be reported together in :meth:`~candle.nn.Module.load_state_dict` """ persistent_buffers = {k: v for k, v in self._buffers.items() if k not in self._non_persistent_buffers_set} local_name_params = persistent_buffers.items() local_state = {k: v for k, v in local_name_params if v is not None} for name, param in local_state.items(): key = prefix + name if key in state_dict: input_param = state_dict[key] if not isinstance(input_param, (Tensor, QTensor)): error_msgs.append( f'While copying the parameter named "{key}", ' "expected Tensor-like object from checkpoint but " f"received {type(input_param)}" ) continue if input_param.shape != param.shape: # local shape should match the one in checkpoint error_msgs.append( "size mismatch for {}: copying a param with shape {} from checkpoint, " "the shape in current model is {}.".format(key, input_param.shape, param.shape) ) continue try: # Shape checks are already done above -> Just assign tensor setattr(self, name, input_param) except Exception as ex: error_msgs.append( f'While copying the parameter named "{key}", ' f"whose dimensions in the model are {param.shape} and " f"whose dimensions in the checkpoint are {input_param.shape}, " f"an exception occurred : {ex.args}." ) elif strict: missing_keys.append(key) if strict: for key in state_dict.keys(): if key.startswith(prefix): input_name = key[len(prefix) :] input_name = input_name.split(".", 1)[0] # get the name of param/buffer/child if input_name not in self._modules and input_name not in local_state: unexpected_keys.append(key) def _named_members(self, get_members_fn, prefix="", recurse=True, remove_duplicate: bool = True): r"""Helper method for yielding various names + members of modules.""" memo = set() modules = self.named_modules(prefix=prefix, remove_duplicate=remove_duplicate) if recurse else [(prefix, self)] for module_prefix, module in modules: members = get_members_fn(module) for k, v in members: if v is None or v in memo: continue if remove_duplicate: memo.add(v) name = module_prefix + ("." if module_prefix else "") + k yield name, v def _get_name(self): return self.__class__.__name__ def _apply(self, fn): for module in self.children(): module._apply(fn) for key, buf in self._buffers.items(): if buf is not None: self._buffers[key] = fn(buf) return self def __move_tensor_to_device(self, tensor: TensorLike, device: str): if isinstance(tensor, Tensor): return tensor.to_device(device) else: raise NotImplementedError("Cannot offload QTensor to cuda, yet!") def device(self) -> str: """ Gets the device of the module, by inspecting its tensors. """ tensor = next(self.buffers()) if isinstance(tensor, Tensor): return tensor.device else: # QTensors can only be on the CPU return "cpu" def cuda(self: T) -> T: r"""Moves all model parameters and buffers to the GPU. This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized. .. note:: This method modifies the module in-place. Returns: Module: self """ def to_cuda(t: TensorLike): return self.__move_tensor_to_device(t, "cuda") return self._apply(to_cuda) def cpu(self: T) -> T: r"""Moves all model parameters and buffers to the CPU. .. note:: This method modifies the module in-place. Returns: Module: self """ def to_cpu(t: TensorLike): return self.__move_tensor_to_device(t, "cpu") return self._apply(to_cpu) def __cast_tensor(self, tensor: TensorLike, dtype: Union[DType, str]): if isinstance(tensor, Tensor): return tensor.to_dtype(dtype) else: raise TypeError("candle.Module.to only accepts Tensor dtypes, but got desired dtype={}".format(dtype)) def type(self: T, dst_type: Union[DType, str]) -> T: r"""Casts all parameters and buffers to :attr:`dst_type`. .. note:: This method modifies the module in-place. Args: dst_type (type or string): the desired type Returns: Module: self """ def cast(t: TensorLike): return self.__cast_tensor(t, dst_type) return self._apply(cast) @overload def to( self: T, device: str = ..., dtype: Optional[Union[DType, str]] = ..., ) -> T: ... @overload def to(self: T, dtype: Union[DType, str]) -> T: ... def to(self, *args, **kwargs): r"""Moves and/or casts the parameters and buffers. This can be called as .. function:: to(device=None, dtype=None) :noindex: .. function:: to(dtype) :noindex: See below for examples. .. note:: This method modifies the module in-place. Args: device (:class:`candle.device`): the desired device of the parameters and buffers in this module dtype (:class:`candle.dtype`): the desired floating point dtype of the parameters and buffers in this module Returns: Module: self """ device = None dtype = None if args: for arg in args: # Assuming arg can be a string representing a device or a dtype if isinstance(arg, str): lower_arg = str(arg).lower() if lower_arg.startswith("cuda") or lower_arg == "cpu": device = lower_arg else: dtype = arg elif isinstance(arg, DType): dtype = str(arg) else: raise TypeError("Module.to() received an invalid combination of arguments. Got: {}".format(args)) if kwargs: device = kwargs.get("device", device) dtype = str(kwargs.get("dtype", dtype)) if device: device = device.lower() if dtype: dtype = dtype.lower() if dtype not in ["f32", "f16", "f64"]: raise TypeError( "candle.Module.to only accepts floating point" "dtypes, but got desired dtype={}".format(dtype) ) def convert(t): if dtype: t = self.__cast_tensor(t, dtype) if device: t = self.__move_tensor_to_device(t, device) return t return self._apply(convert) def __setattr__(self, __name: str, __value: Any) -> None: if isinstance(__value, Module): self._modules[__name] = __value elif isinstance(__value, QTensor): if __name in self._quantizable_buffers: type = __value.ggml_dtype.lower() if type in ["f32", "f16"]: # It is faster to just dequantize the tensor here and use the normal tensor operations dequant = __value.dequantize() if type == "f16": dequant = dequant.to_dtype("f16") self._buffers[__name] = dequant else: self._buffers[__name] = __value else: # We expect a normal tensor here => dequantize it self._buffers[__name] = __value.dequantize() elif isinstance(__value, Tensor): self._buffers[__name] = __value else: super().__setattr__(__name, __value) def __getattr__(self, __name: str) -> Any: if "_modules" in self.__dict__: modules = self.__dict__["_modules"] if __name in modules: return modules[__name] if "_buffers" in self.__dict__: tensors = self.__dict__["_buffers"] if __name in tensors: return tensors[__name] return super().__getattribute__(__name) def __delattr__(self, name): if name in self._buffers: del self._buffers[name] elif name in self._modules: del self._modules[name] else: super().__delattr__(name)
candle/candle-pyo3/py_src/candle/nn/module.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/nn/module.py", "repo_id": "candle", "token_count": 12028 }
import candle print(f"mkl: {candle.utils.has_mkl()}") print(f"accelerate: {candle.utils.has_accelerate()}") print(f"num-threads: {candle.utils.get_num_threads()}") print(f"cuda: {candle.utils.cuda_is_available()}") t = candle.Tensor(42.0) print(t) print(t.shape, t.rank, t.device) print(t + t) t = candle.Tensor([3.0, 1, 4, 1, 5, 9, 2, 6]) print(t) print(t + t) t = t.reshape([2, 4]) print(t.matmul(t.t())) print(t.to_dtype(candle.u8)) print(t.to_dtype("u8")) t = candle.randn((5, 3)) print(t) print(t.dtype) t = candle.randn((16, 256)) quant_t = t.quantize("q6k") dequant_t = quant_t.dequantize() diff2 = (t - dequant_t).sqr() print(diff2.mean_all())
candle/candle-pyo3/test.py/0
{ "file_path": "candle/candle-pyo3/test.py", "repo_id": "candle", "token_count": 340 }
//! BigCode implementation in Rust based on the GPT-BigCode model. //! //! [StarCoder/BigCode](https://huggingface.co/bigcode/starcoderbase-1b) is a LLM //! model specialized to code generation. The initial model was trained on 80 //! programming languages. See "StarCoder: A State-of-the-Art LLM for Code", Mukherjee et al. 2023 //! - [Arxiv](https://arxiv.org/abs/2305.06161) //! - [Github](https://github.com/bigcode-project/starcoder) //! //! ## Running some example //! //! ```bash //! cargo run --example bigcode --release -- --prompt "fn fact(n: u64) -> u64" //! //! > fn fact(n: u64) -> u64 { //! > if n == 0 { //! > 1 //! > } else { //! > n * fact(n - 1) //! > } //! > } //! ``` //! use candle::{DType, Device, IndexOp, Result, Tensor, D}; use candle_nn::{embedding, linear_b as linear, Embedding, LayerNorm, Linear, Module, VarBuilder}; fn layer_norm(size: usize, eps: f64, vb: VarBuilder) -> Result<LayerNorm> { let weight = vb.get(size, "weight")?; let bias = vb.get(size, "bias")?; Ok(LayerNorm::new(weight, bias, eps)) } fn make_causal_mask(t: usize, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..t) .flat_map(|i| (0..t).map(move |j| u8::from(j <= i))) .collect(); let mask = Tensor::from_slice(&mask, (t, t), device)?; Ok(mask) } #[derive(Debug)] pub struct Config { pub vocab_size: usize, // max_position_embeddings aka n_positions pub max_position_embeddings: usize, // num_hidden_layers aka n_layer pub num_hidden_layers: usize, // hidden_size aka n_embd pub hidden_size: usize, pub layer_norm_epsilon: f64, pub n_inner: Option<usize>, // num_attention_heads aka n_head pub num_attention_heads: usize, pub multi_query: bool, pub use_cache: bool, } impl Config { #[allow(dead_code)] pub fn starcoder_1b() -> Self { Self { vocab_size: 49152, max_position_embeddings: 8192, num_hidden_layers: 24, hidden_size: 2048, layer_norm_epsilon: 1e-5, n_inner: Some(8192), num_attention_heads: 16, multi_query: true, use_cache: true, } } #[allow(dead_code)] pub fn starcoder_3b() -> Self { Self { vocab_size: 49152, max_position_embeddings: 8192, num_hidden_layers: 36, hidden_size: 2816, layer_norm_epsilon: 1e-5, n_inner: Some(11264), num_attention_heads: 22, multi_query: true, use_cache: true, } } #[allow(dead_code)] pub fn starcoder_7b() -> Self { Self { vocab_size: 49152, max_position_embeddings: 8192, num_hidden_layers: 42, hidden_size: 4096, layer_norm_epsilon: 1e-5, n_inner: Some(16384), num_attention_heads: 32, multi_query: true, use_cache: true, } } #[allow(dead_code)] pub fn starcoder() -> Self { Self { vocab_size: 49152, max_position_embeddings: 8192, num_hidden_layers: 40, hidden_size: 6144, layer_norm_epsilon: 1e-5, n_inner: Some(24576), num_attention_heads: 48, multi_query: true, use_cache: true, } } } struct Attention { c_attn: Linear, c_proj: Linear, kv_cache: Option<Tensor>, use_cache: bool, embed_dim: usize, kv_dim: usize, num_heads: usize, head_dim: usize, multi_query: bool, } impl Attention { pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let hidden_size = cfg.hidden_size; let head_dim = hidden_size / cfg.num_attention_heads; let kv_heads = if cfg.multi_query { 1 } else { cfg.num_attention_heads }; let kv_dim = kv_heads * head_dim; let c_attn = linear(hidden_size, hidden_size + 2 * kv_dim, true, vb.pp("c_attn"))?; let c_proj = linear(hidden_size, hidden_size, true, vb.pp("c_proj"))?; Ok(Self { c_proj, c_attn, embed_dim: hidden_size, kv_cache: None, use_cache: cfg.use_cache, kv_dim, head_dim, num_heads: cfg.num_attention_heads, multi_query: cfg.multi_query, }) } fn attn( &self, query: &Tensor, key: &Tensor, value: &Tensor, attention_mask: &Tensor, ) -> Result<Tensor> { if query.dtype() != DType::F32 { // If we start supporting f16 models, we may need the upcasting scaling bits. // https://github.com/huggingface/transformers/blob/a0042379269bea9182c1f87e6b2eee4ba4c8cce8/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py#L133 candle::bail!("upcasting is not supported {:?}", query.dtype()) } let scale_factor = 1f64 / (self.head_dim as f64).sqrt(); let initial_query_shape = query.shape(); let key_len = key.dim(D::Minus1)?; let (query, key, attn_shape, attn_view) = if self.multi_query { let (b_sz, query_len, _) = query.dims3()?; let query = query.reshape((b_sz, query_len * self.num_heads, self.head_dim))?; let attn_shape = (b_sz, query_len, self.num_heads, key_len); let attn_view = (b_sz, query_len * self.num_heads, key_len); (query, key.clone(), attn_shape, attn_view) } else { let (b_sz, _num_heads, query_len, _head_dim) = query.dims4()?; let query = query.reshape((b_sz, query_len * self.num_heads, self.head_dim))?; let key = key.reshape((b_sz * self.num_heads, self.head_dim, key_len))?; let attn_shape = (b_sz, self.num_heads, query_len, key_len); let attn_view = (b_sz * self.num_heads, query_len, key_len); (query, key, attn_shape, attn_view) }; let attn_weights = (query.matmul(&key.contiguous()?)? * scale_factor)?.reshape(attn_shape)?; let attention_mask = attention_mask.broadcast_as(attn_shape)?; let mask_value = Tensor::new(f32::NEG_INFINITY, query.device())?.broadcast_as(attn_shape)?; let attn_weights = attention_mask.where_cond(&attn_weights, &mask_value)?; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; let value = value.contiguous()?; let attn_output = if self.multi_query { attn_weights .reshape(attn_view)? .matmul(&value)? .reshape(initial_query_shape)? } else { attn_weights.matmul(&value)? }; Ok(attn_output) } fn forward(&mut self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let qkv = self.c_attn.forward(hidden_states)?; let (query, key_value) = if self.multi_query { let query = qkv.i((.., .., ..self.embed_dim))?; let key_value = qkv.i((.., .., self.embed_dim..self.embed_dim + 2 * self.kv_dim))?; (query, key_value) } else { let mut dims = qkv.dims().to_vec(); dims.pop(); dims.push(self.embed_dim); dims.push(self.head_dim * 3); let qkv = qkv.reshape(dims)?.transpose(1, 2)?; let query = qkv.i((.., .., .., ..self.head_dim))?; let key_value = qkv.i((.., .., .., self.head_dim..3 * self.head_dim))?; (query, key_value) }; let mut key_value = key_value; if self.use_cache { if let Some(kv_cache) = &self.kv_cache { // TODO: we could trim the tensors to MAX_SEQ_LEN so that this would work for // arbitrarily large sizes. key_value = Tensor::cat(&[kv_cache, &key_value], D::Minus2)?.contiguous()?; } self.kv_cache = Some(key_value.clone()) } let key = key_value.narrow(D::Minus1, 0, self.head_dim)?; let value = key_value.narrow(D::Minus1, self.head_dim, self.head_dim)?; let attn_output = self.attn(&query, &key.t()?, &value, attention_mask)?; let attn_output = if self.multi_query { attn_output } else { attn_output .transpose(1, 2)? .reshape(hidden_states.shape())? }; let attn_output = self.c_proj.forward(&attn_output)?; Ok(attn_output) } } struct Mlp { c_fc: Linear, c_proj: Linear, } impl Mlp { fn load(inner_dim: usize, vb: VarBuilder, cfg: &Config) -> Result<Self> { let c_fc = linear(cfg.hidden_size, inner_dim, true, vb.pp("c_fc"))?; let c_proj = linear(inner_dim, cfg.hidden_size, true, vb.pp("c_proj"))?; Ok(Self { c_fc, c_proj }) } fn forward(&mut self, hidden_states: &Tensor) -> Result<Tensor> { let hidden_states = self.c_fc.forward(hidden_states)?.gelu()?; let hidden_states = self.c_proj.forward(&hidden_states)?; Ok(hidden_states) } } // TODO: Add cross-attention? struct Block { ln_1: LayerNorm, attn: Attention, ln_2: LayerNorm, mlp: Mlp, } impl Block { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let hidden_size = cfg.hidden_size; let inner_dim = cfg.n_inner.unwrap_or(4 * hidden_size); let ln_1 = layer_norm(hidden_size, cfg.layer_norm_epsilon, vb.pp("ln_1"))?; let attn = Attention::load(vb.pp("attn"), cfg)?; let ln_2 = layer_norm(hidden_size, cfg.layer_norm_epsilon, vb.pp("ln_2"))?; let mlp = Mlp::load(inner_dim, vb.pp("mlp"), cfg)?; Ok(Self { ln_1, attn, ln_2, mlp, }) } fn forward(&mut self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let residual = hidden_states; let hidden_states = self.ln_1.forward(hidden_states)?; let attn_outputs = self.attn.forward(&hidden_states, attention_mask)?; let hidden_states = (&attn_outputs + residual)?; let residual = &hidden_states; let hidden_states = self.ln_2.forward(&hidden_states)?; let hidden_states = self.mlp.forward(&hidden_states)?; let hidden_states = (&hidden_states + residual)?; Ok(hidden_states) } } pub struct GPTBigCode { wte: Embedding, wpe: Embedding, blocks: Vec<Block>, ln_f: LayerNorm, lm_head: Linear, bias: Tensor, config: Config, } impl GPTBigCode { pub fn config(&self) -> &Config { &self.config } pub fn load(vb: VarBuilder, cfg: Config) -> Result<Self> { let hidden_size = cfg.hidden_size; let vb_t = vb.pp("transformer"); let wte = embedding(cfg.vocab_size, hidden_size, vb_t.pp("wte"))?; let wpe = embedding(cfg.max_position_embeddings, hidden_size, vb_t.pp("wpe"))?; let blocks = (0..cfg.num_hidden_layers) .map(|i| Block::load(vb_t.pp(format!("h.{i}")), &cfg)) .collect::<Result<Vec<_>>>()?; let ln_f = layer_norm(hidden_size, cfg.layer_norm_epsilon, vb_t.pp("ln_f"))?; let lm_head = linear(hidden_size, cfg.vocab_size, false, vb_t.pp("wte"))?; let bias = make_causal_mask(cfg.max_position_embeddings, vb.device())?; Ok(Self { wte, wpe, blocks, lm_head, ln_f, bias, config: cfg, }) } pub fn forward(&mut self, input_ids: &Tensor, past_len: usize) -> Result<Tensor> { let dev = input_ids.device(); let (b_sz, seq_len) = input_ids.dims2()?; let key_len = past_len + seq_len; let attention_mask = self.bias.i((past_len..key_len, ..key_len))?.unsqueeze(0)?; // MQA models: (batch_size, query_length, n_heads, key_length) // MHA models: (batch_size, n_heads, query_length, key_length) let seq_len_dim = if self.config.multi_query { 2 } else { 1 }; let attention_mask = attention_mask.unsqueeze(seq_len_dim)?; let position_ids = Tensor::arange(past_len as u32, (past_len + seq_len) as u32, dev)?; let position_ids = position_ids.unsqueeze(0)?.broadcast_as((b_sz, seq_len))?; let input_embeds = self.wte.forward(input_ids)?; let position_embeds = self.wpe.forward(&position_ids)?; let mut hidden_states = (&input_embeds + &position_embeds)?; for block in self.blocks.iter_mut() { hidden_states = block.forward(&hidden_states, &attention_mask)?; } let hidden_states = self.ln_f.forward(&hidden_states)?; let hidden_states = hidden_states .reshape((b_sz, seq_len, self.config.hidden_size))? .narrow(1, seq_len - 1, 1)?; let logits = self.lm_head.forward(&hidden_states)?.squeeze(1)?; Ok(logits) } }
candle/candle-transformers/src/models/bigcode.rs/0
{ "file_path": "candle/candle-transformers/src/models/bigcode.rs", "repo_id": "candle", "token_count": 6580 }
//! Implementation of the Depth Anything model from FAIR. //! //! See: //! - ["Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data"](https://github.com/LiheYoung/Depth-Anything) //! use std::sync::Arc; use candle::D::Minus1; use candle::{Module, Result, Tensor}; use candle_nn::ops::Identity; use candle_nn::{ batch_norm, conv2d, conv2d_no_bias, conv_transpose2d, linear, seq, Activation, BatchNorm, BatchNormConfig, Conv2d, Conv2dConfig, ConvTranspose2dConfig, Sequential, VarBuilder, }; use crate::models::dinov2::DinoVisionTransformer; pub struct DepthAnythingV2Config { out_channel_sizes: [usize; 4], in_channel_size: usize, // embed_dim in the Dino model num_features: usize, use_batch_norm: bool, use_class_token: bool, layer_ids_vits: Vec<usize>, input_image_size: usize, target_patch_size: usize, } impl DepthAnythingV2Config { #[allow(clippy::too_many_arguments)] pub fn new( out_channel_sizes: [usize; 4], in_channel_size: usize, num_features: usize, use_batch_norm: bool, use_class_token: bool, layer_ids_vits: Vec<usize>, input_image_size: usize, target_patch_size: usize, ) -> Self { Self { out_channel_sizes, in_channel_size, num_features, use_batch_norm, use_class_token, layer_ids_vits, input_image_size, target_patch_size, } } pub fn vit_small() -> Self { Self { out_channel_sizes: [48, 96, 192, 384], in_channel_size: 384, num_features: 64, use_batch_norm: false, use_class_token: false, layer_ids_vits: vec![2, 5, 8, 11], input_image_size: 518, target_patch_size: 518 / 14, } } pub fn vit_base() -> Self { Self { out_channel_sizes: [96, 192, 384, 768], in_channel_size: 768, num_features: 128, use_batch_norm: false, use_class_token: false, layer_ids_vits: vec![2, 5, 8, 11], input_image_size: 518, target_patch_size: 518 / 14, } } pub fn vit_large() -> Self { Self { out_channel_sizes: [256, 512, 1024, 1024], in_channel_size: 1024, num_features: 256, use_batch_norm: false, use_class_token: false, layer_ids_vits: vec![4, 11, 17, 23], input_image_size: 518, target_patch_size: 518 / 14, } } pub fn vit_giant() -> Self { Self { out_channel_sizes: [1536, 1536, 1536, 1536], in_channel_size: 1536, num_features: 384, use_batch_norm: false, use_class_token: false, layer_ids_vits: vec![9, 19, 29, 39], input_image_size: 518, target_patch_size: 518 / 14, } } } pub struct ResidualConvUnit { activation: Activation, conv1: Conv2d, conv2: Conv2d, batch_norm1: Option<BatchNorm>, batch_norm2: Option<BatchNorm>, } impl ResidualConvUnit { pub fn new( conf: &DepthAnythingV2Config, activation: Activation, vb: VarBuilder, ) -> Result<Self> { const KERNEL_SIZE: usize = 3; let conv_cfg = Conv2dConfig { padding: 1, stride: 1, dilation: 1, groups: 1, }; let conv1 = conv2d( conf.num_features, conf.num_features, KERNEL_SIZE, conv_cfg, vb.pp("conv1"), )?; let conv2 = conv2d( conf.num_features, conf.num_features, KERNEL_SIZE, conv_cfg, vb.pp("conv2"), )?; let (batch_norm1, batch_norm2) = match conf.use_batch_norm { true => { let batch_norm_cfg = BatchNormConfig { eps: 1e-05, remove_mean: false, affine: true, momentum: 0.1, }; ( Some(batch_norm(conf.num_features, batch_norm_cfg, vb.pp("bn1"))?), Some(batch_norm(conf.num_features, batch_norm_cfg, vb.pp("bn2"))?), ) } false => (None, None), }; Ok(Self { activation, conv1, conv2, batch_norm1, batch_norm2, }) } } impl Module for ResidualConvUnit { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let out = self.activation.forward(xs)?; let out = self.conv1.forward(&out)?; let out = if let Some(batch_norm1) = &self.batch_norm1 { batch_norm1.forward_train(&out)? } else { out }; let out = self.activation.forward(&out)?; let out = self.conv2.forward(&out)?; let out = if let Some(batch_norm2) = &self.batch_norm2 { batch_norm2.forward_train(&out)? } else { out }; out + xs } } pub struct FeatureFusionBlock { res_conv_unit1: ResidualConvUnit, res_conv_unit2: ResidualConvUnit, output_conv: Conv2d, target_patch_size: usize, } impl FeatureFusionBlock { pub fn new( conf: &DepthAnythingV2Config, target_patch_size: usize, activation: Activation, vb: VarBuilder, ) -> Result<Self> { const KERNEL_SIZE: usize = 1; let conv_cfg = Conv2dConfig { padding: 0, stride: 1, dilation: 1, groups: 1, }; let output_conv = conv2d( conf.num_features, conf.num_features, KERNEL_SIZE, conv_cfg, vb.pp("out_conv"), )?; let res_conv_unit1 = ResidualConvUnit::new(conf, activation, vb.pp("resConfUnit1"))?; let res_conv_unit2 = ResidualConvUnit::new(conf, activation, vb.pp("resConfUnit2"))?; Ok(Self { res_conv_unit1, res_conv_unit2, output_conv, target_patch_size, }) } } impl Module for FeatureFusionBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let out = self.res_conv_unit2.forward(xs)?; let out = out.interpolate2d(self.target_patch_size, self.target_patch_size)?; self.output_conv.forward(&out) } } pub struct Scratch { layer1_rn: Conv2d, layer2_rn: Conv2d, layer3_rn: Conv2d, layer4_rn: Conv2d, refine_net1: FeatureFusionBlock, refine_net2: FeatureFusionBlock, refine_net3: FeatureFusionBlock, refine_net4: FeatureFusionBlock, output_conv1: Conv2d, output_conv2: Sequential, } impl Scratch { pub fn new(conf: &DepthAnythingV2Config, vb: VarBuilder) -> Result<Self> { const KERNEL_SIZE: usize = 3; let conv_cfg = Conv2dConfig { padding: 1, stride: 1, dilation: 1, groups: 1, }; let layer1_rn = conv2d_no_bias( conf.out_channel_sizes[0], conf.num_features, KERNEL_SIZE, conv_cfg, vb.pp("layer1_rn"), )?; let layer2_rn = conv2d_no_bias( conf.out_channel_sizes[1], conf.num_features, KERNEL_SIZE, conv_cfg, vb.pp("layer2_rn"), )?; let layer3_rn = conv2d_no_bias( conf.out_channel_sizes[2], conf.num_features, KERNEL_SIZE, conv_cfg, vb.pp("layer3_rn"), )?; let layer4_rn = conv2d_no_bias( conf.out_channel_sizes[3], conf.num_features, KERNEL_SIZE, conv_cfg, vb.pp("layer4_rn"), )?; let refine_net1 = FeatureFusionBlock::new( conf, conf.target_patch_size * 8, Activation::Relu, vb.pp("refinenet1"), )?; let refine_net2 = FeatureFusionBlock::new( conf, conf.target_patch_size * 4, Activation::Relu, vb.pp("refinenet2"), )?; let refine_net3 = FeatureFusionBlock::new( conf, conf.target_patch_size * 2, Activation::Relu, vb.pp("refinenet3"), )?; let refine_net4 = FeatureFusionBlock::new( conf, conf.target_patch_size, Activation::Relu, vb.pp("refinenet4"), )?; let conv_cfg = Conv2dConfig { padding: 1, stride: 1, dilation: 1, groups: 1, }; let output_conv1 = conv2d( conf.num_features, conf.num_features / 2, KERNEL_SIZE, conv_cfg, vb.pp("output_conv1"), )?; let output_conv2 = seq(); const HEAD_FEATURES_2: usize = 32; const OUT_CHANNELS_2: usize = 1; const KERNEL_SIZE_2: usize = 1; let output_conv2 = output_conv2.add(conv2d( conf.num_features / 2, HEAD_FEATURES_2, KERNEL_SIZE, conv_cfg, vb.pp("output_conv2").pp("0"), )?); let output_conv2 = output_conv2 .add(Activation::Relu) .add(conv2d( HEAD_FEATURES_2, OUT_CHANNELS_2, KERNEL_SIZE_2, conv_cfg, vb.pp("output_conv2").pp("2"), )?) .add(Activation::Relu); Ok(Self { layer1_rn, layer2_rn, layer3_rn, layer4_rn, refine_net1, refine_net2, refine_net3, refine_net4, output_conv1, output_conv2, }) } } const NUM_CHANNELS: usize = 4; pub struct DPTHead { projections: Vec<Conv2d>, resize_layers: Vec<Box<dyn Module>>, readout_projections: Vec<Sequential>, scratch: Scratch, use_class_token: bool, input_image_size: usize, target_patch_size: usize, } impl DPTHead { pub fn new(conf: &DepthAnythingV2Config, vb: VarBuilder) -> Result<Self> { let mut projections: Vec<Conv2d> = Vec::with_capacity(conf.out_channel_sizes.len()); for (conv_index, out_channel_size) in conf.out_channel_sizes.iter().enumerate() { projections.push(conv2d( conf.in_channel_size, *out_channel_size, 1, Default::default(), vb.pp("projects").pp(conv_index.to_string()), )?); } let resize_layers: Vec<Box<dyn Module>> = vec![ Box::new(conv_transpose2d( conf.out_channel_sizes[0], conf.out_channel_sizes[0], 4, ConvTranspose2dConfig { padding: 0, stride: 4, dilation: 1, output_padding: 0, }, vb.pp("resize_layers").pp("0"), )?), Box::new(conv_transpose2d( conf.out_channel_sizes[1], conf.out_channel_sizes[1], 2, ConvTranspose2dConfig { padding: 0, stride: 2, dilation: 1, output_padding: 0, }, vb.pp("resize_layers").pp("1"), )?), Box::new(Identity::new()), Box::new(conv2d( conf.out_channel_sizes[3], conf.out_channel_sizes[3], 3, Conv2dConfig { padding: 1, stride: 2, dilation: 1, groups: 1, }, vb.pp("resize_layers").pp("3"), )?), ]; let readout_projections = if conf.use_class_token { let rop = Vec::with_capacity(NUM_CHANNELS); for rop_index in 0..NUM_CHANNELS { seq() .add(linear( 2 * conf.in_channel_size, conf.in_channel_size, vb.pp("readout_projects").pp(rop_index.to_string()), )?) .add(Activation::Gelu); } rop } else { vec![] }; let scratch = Scratch::new(conf, vb.pp("scratch"))?; Ok(Self { projections, resize_layers, readout_projections, scratch, use_class_token: conf.use_class_token, input_image_size: conf.input_image_size, target_patch_size: conf.target_patch_size, }) } } impl Module for DPTHead { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut out: Vec<Tensor> = Vec::with_capacity(NUM_CHANNELS); for i in 0..NUM_CHANNELS { let x = if self.use_class_token { let x = xs.get(i)?.get(0)?; let class_token = xs.get(i)?.get(1)?; let readout = class_token.unsqueeze(1)?.expand(x.shape())?; let to_cat = [x, readout]; let cat = Tensor::cat(&to_cat, Minus1)?; self.readout_projections[i].forward(&cat)? } else { xs.get(i)? }; let x_dims = x.dims(); let x = x.permute((0, 2, 1))?.reshape(( x_dims[0], x_dims[x_dims.len() - 1], self.target_patch_size, self.target_patch_size, ))?; let x = self.projections[i].forward(&x)?; let x = self.resize_layers[i].forward(&x)?; out.push(x); } let layer_1_rn = self.scratch.layer1_rn.forward(&out[0])?; let layer_2_rn = self.scratch.layer2_rn.forward(&out[1])?; let layer_3_rn = self.scratch.layer3_rn.forward(&out[2])?; let layer_4_rn = self.scratch.layer4_rn.forward(&out[3])?; let path4 = self.scratch.refine_net4.forward(&layer_4_rn)?; let res3_out = self .scratch .refine_net3 .res_conv_unit1 .forward(&layer_3_rn)?; let res3_out = path4.add(&res3_out)?; let path3 = self.scratch.refine_net3.forward(&res3_out)?; let res2_out = self .scratch .refine_net2 .res_conv_unit1 .forward(&layer_2_rn)?; let res2_out = path3.add(&res2_out)?; let path2 = self.scratch.refine_net2.forward(&res2_out)?; let res1_out = self .scratch .refine_net1 .res_conv_unit1 .forward(&layer_1_rn)?; let res1_out = path2.add(&res1_out)?; let path1 = self.scratch.refine_net1.forward(&res1_out)?; let out = self.scratch.output_conv1.forward(&path1)?; let out = out.interpolate2d(self.input_image_size, self.input_image_size)?; self.scratch.output_conv2.forward(&out) } } pub struct DepthAnythingV2 { pretrained: Arc<DinoVisionTransformer>, depth_head: DPTHead, conf: DepthAnythingV2Config, } impl DepthAnythingV2 { pub fn new( pretrained: Arc<DinoVisionTransformer>, conf: DepthAnythingV2Config, vb: VarBuilder, ) -> Result<Self> { let depth_head = DPTHead::new(&conf, vb.pp("depth_head"))?; Ok(Self { pretrained, depth_head, conf, }) } } impl Module for DepthAnythingV2 { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let features = self.pretrained.get_intermediate_layers( xs, &self.conf.layer_ids_vits, false, false, true, )?; let depth = self.depth_head.forward(&features)?; depth.relu() } }
candle/candle-transformers/src/models/depth_anything_v2.rs/0
{ "file_path": "candle/candle-transformers/src/models/depth_anything_v2.rs", "repo_id": "candle", "token_count": 9140 }
// Copyright (c) Kyutai, all rights reserved. // This source code is licensed under the license found in the // LICENSE file in the root directory of this source tree. use super::{conv, quantization, seanet, transformer}; use candle::{DType, Device, Module, Result, StreamTensor, StreamingModule, Tensor}; use candle_nn::VarBuilder; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum ResampleMethod { Conv, Interpolate, } #[derive(Debug, Clone)] pub struct Config { pub channels: usize, pub sample_rate: f64, pub frame_rate: f64, pub renormalize: bool, pub resample_method: ResampleMethod, pub seanet: seanet::Config, pub transformer: transformer::Config, pub quantizer_n_q: usize, pub quantizer_bins: usize, pub quantizer_dim: usize, } impl Config { // /lustre/scwpod02/client/kyutai/alex/mimi_exp/xps/b7d2bd5a/.hydra/config.yaml pub fn v0_1(num_codebooks: Option<usize>) -> Self { let seanet_cfg = seanet::Config { dimension: 512, channels: 1, causal: true, n_filters: 64, n_residual_layers: 1, activation: candle_nn::Activation::Elu(1.), compress: 2, dilation_base: 2, disable_norm_outer_blocks: 0, final_activation: None, kernel_size: 7, residual_kernel_size: 3, last_kernel_size: 3, lstm: 0, norm: conv::Norm::WeightNorm, pad_mode: conv::PadMode::Constant, ratios: vec![8, 6, 5, 4], true_skip: true, }; let transformer_cfg = transformer::Config { d_model: seanet_cfg.dimension, num_heads: 8, num_layers: 8, causal: true, norm_first: true, bias_ff: false, bias_attn: false, layer_scale: Some(0.01), context: 250, conv_kernel_size: 5, use_conv_bias: true, use_conv_block: false, cross_attention: false, max_period: 10000, gating: None, norm: super::NormType::LayerNorm, positional_embedding: transformer::PositionalEmbedding::Rope, dim_feedforward: 2048, kv_repeat: 1, conv_layout: true, // see builders.py max_seq_len: 8192, // the transformer works at 25hz so this is ~5 mins. }; Config { channels: 1, sample_rate: 24_000., frame_rate: 12.5, renormalize: true, resample_method: ResampleMethod::Conv, seanet: seanet_cfg, transformer: transformer_cfg, quantizer_n_q: num_codebooks.unwrap_or(16), quantizer_bins: 2048, quantizer_dim: 256, } } } #[derive(Debug, Clone)] pub struct Encodec { encoder: seanet::SeaNetEncoder, decoder: seanet::SeaNetDecoder, encoder_transformer: transformer::ProjectedTransformer, decoder_transformer: transformer::ProjectedTransformer, downsample: conv::ConvDownsample1d, upsample: conv::ConvTrUpsample1d, quantizer: quantization::SplitResidualVectorQuantizer, config: Config, } impl Encodec { pub fn new(cfg: Config, vb: VarBuilder) -> Result<Self> { let dim = cfg.seanet.dimension; let encoder = seanet::SeaNetEncoder::new(&cfg.seanet, vb.pp("encoder"))?; let decoder = seanet::SeaNetDecoder::new(&cfg.seanet, vb.pp("decoder"))?; let encoder_transformer = transformer::ProjectedTransformer::new( dim, &[dim], &cfg.transformer, vb.pp("encoder_transformer"), )?; let decoder_transformer = transformer::ProjectedTransformer::new( dim, &[dim], &cfg.transformer, vb.pp("decoder_transformer"), )?; let quantizer = quantization::SplitResidualVectorQuantizer::new( /* dim */ cfg.quantizer_dim, /* input_dim */ Some(dim), /* output_dim */ Some(dim), /* n_q */ cfg.quantizer_n_q, /* bins */ cfg.quantizer_bins, vb.pp("quantizer"), )?; let encoder_frame_rate = cfg.sample_rate / cfg.seanet.ratios.iter().product::<usize>() as f64; let downsample_stride = (encoder_frame_rate / cfg.frame_rate) as usize; // `upsample` and `downsample` only apply if frame_rate is different from encoder_frame_rate. let downsample = conv::ConvDownsample1d::new( /* stride */ downsample_stride, /* dim */ dim, /* causal */ true, /* learnt */ true, vb.pp("downsample"), )?; let upsample = conv::ConvTrUpsample1d::new( /* stride */ downsample_stride, /* dim */ dim, /* causal */ true, /* learnt */ true, vb.pp("upsample"), )?; Ok(Self { encoder, decoder, encoder_transformer, decoder_transformer, quantizer, downsample, upsample, config: cfg, }) } pub fn config(&self) -> &Config { &self.config } pub fn encode_pre_quantize(&mut self, xs: &Tensor) -> Result<Tensor> { let xs = self.encoder.forward(xs)?; self.encoder_transformer.reset_state(); let xs = self.encoder_transformer.forward(&xs)?; let xs = &xs[0]; xs.apply(&self.downsample) } pub fn encode(&mut self, xs: &Tensor) -> Result<Tensor> { let xs = self.encoder.forward(xs)?; self.encoder_transformer.reset_state(); let xs = self.encoder_transformer.forward(&xs)?; let xs = &xs[0]; let xs = xs.apply(&self.downsample)?; let codes = self.quantizer.encode(&xs)?; Ok(codes) } pub fn encode_step(&mut self, xs: &StreamTensor) -> Result<StreamTensor> { let xs = self.encoder.step(xs)?; let xs = self.encoder_transformer.step(&xs)?; let xs = self.downsample.step(&xs)?; match xs.as_option() { None => Ok(().into()), Some(xs) => { let codes = self.quantizer.encode(xs)?; Ok(codes.into()) } } } pub fn decode(&mut self, codes: &Tensor) -> Result<Tensor> { let emb = self.quantizer.decode(codes)?; let emb = emb.apply(&self.upsample)?; self.decoder_transformer.reset_state(); let outs = self.decoder_transformer.forward(&emb)?; let out = &outs[0]; self.decoder.forward(out) } pub fn decode_step(&mut self, codes: &StreamTensor) -> Result<StreamTensor> { let emb = match codes.as_option() { Some(codes) => StreamTensor::from_tensor(self.quantizer.decode(codes)?), None => StreamTensor::empty(), }; let emb = self.upsample.step(&emb)?; let out = self.decoder_transformer.step(&emb)?; self.decoder.step(&out) } pub fn reset_state(&mut self) { self.encoder.reset_state(); self.encoder_transformer.reset_state(); self.decoder.reset_state(); self.decoder_transformer.reset_state(); self.upsample.reset_state(); } } pub fn load(model_file: &str, num_codebooks: Option<usize>, dev: &Device) -> Result<Encodec> { let vb = unsafe { candle_nn::VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, dev)? }; let cfg = Config::v0_1(num_codebooks); let encodec = Encodec::new(cfg, vb)?; Ok(encodec) }
candle/candle-transformers/src/models/mimi/encodec.rs/0
{ "file_path": "candle/candle-transformers/src/models/mimi/encodec.rs", "repo_id": "candle", "token_count": 3773 }
//! Candle implementations for various deep learning models //! //! This crate provides implementations of popular machine learning models and architectures for different modalities. //! //! - Large language models: [`llama`], [`phi3`], [`mamba`], [`mixtral`], [`bert`], ... //! - Text to text models: [`t5`], ... //! - Image to text models: [`blip`], ... //! - Text to image models: [`stable_diffusion`] and [`wuerstchen`], ... //! - Audio models: [`whisper`], [`encodec`], [`metavoice`], [`parler_tts`], ... //! - Computer vision models: [`dinov2`], [`convmixer`], [`efficientnet`], ... //! //! Some of the models also have quantized variants, e.g. [`quantized_blip`], [`quantized_llama`] and [`quantized_qwen2`]. //! //! The implementations aim to be readable while maintaining good performance. For more information //! on each model see the model's module docs in the links below. pub mod based; pub mod beit; pub mod bert; pub mod bigcode; pub mod blip; pub mod blip_text; pub mod chatglm; pub mod chinese_clip; pub mod clip; pub mod codegeex4_9b; pub mod colpali; pub mod convmixer; pub mod convnext; pub mod dac; pub mod debertav2; pub mod depth_anything_v2; pub mod dinov2; pub mod dinov2reg4; pub mod distilbert; pub mod efficientnet; pub mod efficientvit; pub mod encodec; pub mod eva2; pub mod falcon; pub mod fastvit; pub mod flux; pub mod gemma; pub mod gemma2; pub mod glm4; pub mod granite; pub mod helium; pub mod hiera; pub mod jina_bert; pub mod llama; pub mod llama2_c; pub mod llama2_c_weights; pub mod llava; pub mod mamba; pub mod marian; pub mod metavoice; pub mod mimi; pub mod mistral; pub mod mixformer; pub mod mixtral; pub mod mmdit; pub mod mobileclip; pub mod mobilenetv4; pub mod mobileone; pub mod modernbert; pub mod moondream; pub mod mpt; pub mod nvembed_v2; pub mod olmo; pub mod openclip; pub mod paligemma; pub mod parler_tts; pub mod persimmon; pub mod phi; pub mod phi3; pub mod pixtral; pub mod quantized_blip; pub mod quantized_blip_text; pub mod quantized_llama; pub mod quantized_llama2_c; pub mod quantized_metavoice; pub mod quantized_mistral; pub mod quantized_mixformer; pub mod quantized_moondream; pub mod quantized_mpt; pub mod quantized_phi; pub mod quantized_phi3; pub mod quantized_qwen2; pub mod quantized_recurrent_gemma; pub mod quantized_rwkv_v5; pub mod quantized_rwkv_v6; pub mod quantized_stable_lm; pub mod quantized_t5; pub mod qwen2; pub mod qwen2_moe; pub mod recurrent_gemma; pub mod repvgg; pub mod resnet; pub mod rwkv_v5; pub mod rwkv_v6; pub mod segformer; pub mod segment_anything; pub mod siglip; pub mod stable_diffusion; pub mod stable_lm; pub mod starcoder2; pub mod stella_en_v5; pub mod t5; pub mod trocr; pub mod vgg; pub mod vit; pub mod whisper; pub mod with_tracing; pub mod wuerstchen; pub mod xlm_roberta; pub mod yi;
candle/candle-transformers/src/models/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/mod.rs", "repo_id": "candle", "token_count": 1059 }
//! Pixtral Language-Image Pre-Training //! //! Pixtral is an architecture trained for multimodal learning //! using images paired with text descriptions. //! //! - 💻 Transformers Python [reference implementation](https://github.com/huggingface/transformers/tree/main/src/transformers/models/pixtral) //! - 📝 [Blog Post](https://mistral.ai/news/pixtral-12b/) //! - 🤗 [HF Model Card](https://huggingface.co/mistralai/Pixtral-12B-2409) //! - 🤗 [HF Community Model Card](https://huggingface.co/mistral-community/pixtral-12b) //! //! # Example //! //! <div align=center> //! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/flux/assets/flux-robot.jpg" alt="" width=320> //! </div> //! //! ```bash //! cargo run --profile=release-with-debug \ //! --features cuda \ //! --example pixtral -- \ //! --image candle-examples/examples/flux/assets/flux-robot.jpg //! ``` //! //! ```txt //! Describe the image. //! //! The image depicts a charming, rustic robot standing on a sandy beach at sunset. //! The robot has a vintage, steampunk aesthetic with visible gears and mechanical //! parts. It is holding a small lantern in one hand, which emits a warm glow, and //! its other arm is extended forward as if reaching out or guiding the way. The //! robot's body is adorned with the word "RUST" in bright orange letters, adding to //! its rustic theme. //! //! The background features a dramatic sky filled with clouds, illuminated by the //! setting sun, casting a golden hue over the scene. Gentle waves lap against the //! shore, creating a serene and picturesque atmosphere. The overall mood of the //! image is whimsical and nostalgic, evoking a sense of adventure and tranquility. //! ``` pub mod llava; pub mod vision_model; pub use llava::{Config, Model};
candle/candle-transformers/src/models/pixtral/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/pixtral/mod.rs", "repo_id": "candle", "token_count": 578 }
// Adapted from: // https://github.com/ChaoningZhang/MobileSAM/blob/master/mobile_sam/modeling/tiny_vit_sam.py use candle::{IndexOp, Result, Tensor, D}; use candle_nn::{Conv2dConfig, Module, VarBuilder}; const MBCONV_EXPAND_RATIO: usize = 4; const MLP_RATIO: usize = 4; const LOCAL_CONV_SIZE: usize = 3; const IMG_SIZE: usize = 1024; const IN_CHANNELS: usize = 3; #[derive(Debug)] struct Conv2dBN { c: candle_nn::Conv2d, bn: candle_nn::BatchNorm, span: tracing::Span, } impl Conv2dBN { fn new(in_: usize, out: usize, ks: usize, cfg: Conv2dConfig, vb: VarBuilder) -> Result<Self> { let c = candle_nn::conv2d_no_bias(in_, out, ks, cfg, vb.pp("c"))?; let bn = candle_nn::batch_norm(out, 1e-5, vb.pp("bn"))?; let span = tracing::span!(tracing::Level::TRACE, "conv2d-bn"); Ok(Self { c, bn, span }) } } impl Module for Conv2dBN { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); xs.apply(&self.c)?.apply_t(&self.bn, false) } } #[derive(Debug)] struct PatchEmbed { conv1: Conv2dBN, conv2: Conv2dBN, span: tracing::Span, } impl PatchEmbed { fn new(in_chans: usize, embed_dim: usize, vb: VarBuilder) -> Result<Self> { let cfg = candle_nn::Conv2dConfig { stride: 2, padding: 1, ..Default::default() }; let conv1 = Conv2dBN::new(in_chans, embed_dim / 2, 3, cfg, vb.pp("seq.0"))?; let conv2 = Conv2dBN::new(embed_dim / 2, embed_dim, 3, cfg, vb.pp("seq.2"))?; let span = tracing::span!(tracing::Level::TRACE, "patch-embed"); Ok(Self { conv1, conv2, span }) } } impl Module for PatchEmbed { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); xs.apply(&self.conv1)?.gelu()?.apply(&self.conv2) } } #[derive(Debug)] struct MBConv { conv1: Conv2dBN, conv2: Conv2dBN, conv3: Conv2dBN, span: tracing::Span, } impl MBConv { fn new(in_: usize, out: usize, expand_ratio: usize, vb: VarBuilder) -> Result<Self> { let hidden = in_ * expand_ratio; let cfg2 = candle_nn::Conv2dConfig { padding: 1, groups: hidden, ..Default::default() }; let conv1 = Conv2dBN::new(in_, hidden, 1, Default::default(), vb.pp("conv1"))?; let conv2 = Conv2dBN::new(hidden, hidden, 3, cfg2, vb.pp("conv2"))?; let conv3 = Conv2dBN::new(hidden, out, 1, Default::default(), vb.pp("conv3"))?; let span = tracing::span!(tracing::Level::TRACE, "mb-conv"); Ok(Self { conv1, conv2, conv3, span, }) } } impl Module for MBConv { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let shortcut = xs; let xs = xs .apply(&self.conv1)? .gelu()? .apply(&self.conv2)? .gelu()? .apply(&self.conv3)?; (xs + shortcut)?.gelu() } } #[derive(Debug)] struct PatchMerging { conv1: Conv2dBN, conv2: Conv2dBN, conv3: Conv2dBN, input_resolution: (usize, usize), span: tracing::Span, } impl PatchMerging { fn new( input_resolution: (usize, usize), dim: usize, out: usize, vb: VarBuilder, ) -> Result<Self> { let stride = if [320, 448, 576].contains(&out) { 1 } else { 2 }; let cfg2 = candle_nn::Conv2dConfig { padding: 1, stride, groups: out, ..Default::default() }; let conv1 = Conv2dBN::new(dim, out, 1, Default::default(), vb.pp("conv1"))?; let conv2 = Conv2dBN::new(out, out, 3, cfg2, vb.pp("conv2"))?; let conv3 = Conv2dBN::new(out, out, 1, Default::default(), vb.pp("conv3"))?; let span = tracing::span!(tracing::Level::TRACE, "patch-merging"); Ok(Self { conv1, conv2, conv3, input_resolution, span, }) } } impl Module for PatchMerging { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = if xs.rank() == 3 { let (h, w) = self.input_resolution; let b = xs.dim(0)?; xs.reshape((b, h, w, ()))?.permute((0, 3, 1, 2))? } else { xs.clone() }; xs.apply(&self.conv1)? .gelu()? .apply(&self.conv2)? .gelu()? .apply(&self.conv3)? .flatten_from(2)? .transpose(1, 2) } } #[derive(Debug)] struct ConvLayer { blocks: Vec<MBConv>, downsample: Option<PatchMerging>, span: tracing::Span, } impl ConvLayer { fn new( dim: usize, out: usize, input_resolution: (usize, usize), depth: usize, downsample: bool, conv_expand_ratio: usize, vb: VarBuilder, ) -> Result<Self> { let vb_b = vb.pp("blocks"); let mut blocks = Vec::with_capacity(depth); for index in 0..depth { let block = MBConv::new(dim, dim, conv_expand_ratio, vb_b.pp(index))?; blocks.push(block) } let downsample = if downsample { let downsample = PatchMerging::new(input_resolution, dim, out, vb.pp("downsample"))?; Some(downsample) } else { None }; let span = tracing::span!(tracing::Level::TRACE, "conv-layer"); Ok(Self { blocks, downsample, span, }) } } impl Module for ConvLayer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.clone(); for block in self.blocks.iter() { xs = block.forward(&xs)? } match &self.downsample { None => Ok(xs), Some(downsample) => downsample.forward(&xs), } } } #[derive(Debug)] struct Mlp { norm: candle_nn::LayerNorm, fc1: super::Linear, fc2: super::Linear, span: tracing::Span, } impl Mlp { fn new(in_: usize, hidden: usize, vb: VarBuilder) -> Result<Self> { let norm = candle_nn::layer_norm(in_, 1e-5, vb.pp("norm"))?; let fc1 = super::linear(vb.pp("fc1"), in_, hidden, true)?; let fc2 = super::linear(vb.pp("fc2"), hidden, in_, true)?; let span = tracing::span!(tracing::Level::TRACE, "mlp"); Ok(Self { norm, fc1, fc2, span, }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); xs.apply(&self.norm)? .apply(&self.fc1)? .gelu()? .apply(&self.fc2) } } #[derive(Debug)] struct Attention { norm: candle_nn::LayerNorm, qkv: super::Linear, proj: super::Linear, ab: Tensor, key_dim: usize, num_heads: usize, d: usize, dh: usize, scale: f64, span: tracing::Span, span_matmul: tracing::Span, span_softmax: tracing::Span, } impl Attention { fn new( dim: usize, key_dim: usize, num_heads: usize, attn_ratio: usize, resolution: (usize, usize), vb: VarBuilder, ) -> Result<Self> { let d = attn_ratio * key_dim; let dh = d * num_heads; let nh_kd = key_dim * num_heads; let h = dh + nh_kd * 2; let norm = candle_nn::layer_norm(dim, 1e-5, vb.pp("norm"))?; let qkv = super::linear(vb.pp("qkv"), dim, h, true)?; let proj = super::linear(vb.pp("proj"), dh, dim, true)?; let points = (0..resolution.0) .flat_map(|x| (0..resolution.1).map(move |y| (x as i64, y as i64))) .collect::<Vec<_>>(); let mut idxs = Vec::with_capacity(points.len() * points.len()); let mut attention_offsets = std::collections::HashMap::new(); for &(x1, y1) in points.iter() { for &(x2, y2) in points.iter() { let offset = ((x2 - x1).abs(), (y2 - y1).abs()); let l = attention_offsets.len(); let idx = attention_offsets.entry(offset).or_insert(l); idxs.push(*idx as u32) } } let attention_biases = vb.get((num_heads, attention_offsets.len()), "attention_biases")?; let idxs = Tensor::new(idxs, attention_biases.device())?; let ab = attention_biases .index_select(&idxs, 1)? .reshape(((), points.len(), points.len()))?; let span = tracing::span!(tracing::Level::TRACE, "attention"); let span_matmul = tracing::span!(tracing::Level::TRACE, "attn-matmul"); let span_softmax = tracing::span!(tracing::Level::TRACE, "attn-sm"); Ok(Self { norm, qkv, proj, ab, key_dim, num_heads, d, dh, scale: 1f64 / (key_dim as f64).sqrt(), span, span_matmul, span_softmax, }) } } impl Module for Attention { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (b, n, _) = xs.dims3()?; let xs = xs.apply(&self.norm)?; let qkv = xs.apply(&self.qkv)?.reshape((b, n, self.num_heads, ()))?; let q = qkv .narrow(D::Minus1, 0, self.key_dim)? .permute((0, 2, 1, 3))? .contiguous()?; let k = qkv .narrow(D::Minus1, self.key_dim, self.key_dim)? .permute((0, 2, 1, 3))? .contiguous()?; let v = qkv .narrow(D::Minus1, 2 * self.key_dim, self.d)? .permute((0, 2, 1, 3))? .contiguous()?; let attn = { let _enter = self.span_matmul.enter(); (q.matmul(&k.t()?)? * self.scale)? }; let attn = attn.broadcast_add(&self.ab)?; let attn = { let _enter = self.span_softmax.enter(); candle_nn::ops::softmax_last_dim(&attn)? }; let attn = { let _enter = self.span_matmul.enter(); attn.matmul(&v)? }; attn.transpose(1, 2)? .reshape((b, n, self.dh))? .apply(&self.proj) } } #[derive(Debug)] struct TinyViTBlock { attn: Attention, local_conv: Conv2dBN, mlp: Mlp, window_size: usize, input_resolution: (usize, usize), span: tracing::Span, } impl TinyViTBlock { fn new( dim: usize, input_resolution: (usize, usize), num_heads: usize, window_size: usize, vb: VarBuilder, ) -> Result<Self> { let head_dim = dim / num_heads; let attn = Attention::new( dim, head_dim, num_heads, 1, (window_size, window_size), vb.pp("attn"), )?; let mlp = Mlp::new(dim, dim * MLP_RATIO, vb.pp("mlp"))?; let cfg = candle_nn::Conv2dConfig { padding: LOCAL_CONV_SIZE / 2, groups: dim, ..Default::default() }; let local_conv = Conv2dBN::new(dim, dim, LOCAL_CONV_SIZE, cfg, vb.pp("local_conv"))?; let span = tracing::span!(tracing::Level::TRACE, "attention"); Ok(Self { attn, local_conv, mlp, window_size, input_resolution, span, }) } } impl Module for TinyViTBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (h, w) = self.input_resolution; let (b, l, c) = xs.dims3()?; let res_x = xs; let xs = if h == self.window_size && w == self.window_size { self.attn.forward(xs)? } else { let xs = xs.reshape((b, h, w, c))?; let pad_b = (self.window_size - h % self.window_size) % self.window_size; let pad_r = (self.window_size - w % self.window_size) % self.window_size; let xs = if pad_b > 0 { xs.pad_with_zeros(1, 0, pad_b)? } else { xs }; let xs = if pad_r > 0 { xs.pad_with_zeros(2, 0, pad_r)? } else { xs }; let (p_h, p_w) = (h + pad_b, w + pad_r); let n_h = p_h / self.window_size; let n_w = p_w / self.window_size; let xs = xs .reshape((b, n_h, self.window_size, n_w, self.window_size, c))? .transpose(2, 3)? .reshape((b * n_h * n_w, self.window_size * self.window_size, c))?; let xs = self.attn.forward(&xs)?; let xs = xs .reshape((b, n_h, n_w, self.window_size, self.window_size, c))? .transpose(2, 3)? .reshape((b, p_h, p_w, c))?; let xs = if pad_r > 0 { xs.i((.., .., ..w))?.contiguous()? } else { xs }; let xs = if pad_b > 0 { xs.i((.., ..h, ..))?.contiguous()? } else { xs }; xs.reshape((b, l, c))? }; let xs = (xs + res_x)?; let xs = xs .transpose(1, 2)? .reshape((b, c, h, w))? .apply(&self.local_conv)? .reshape((b, c, l))? .transpose(1, 2)?; &xs + self.mlp.forward(&xs)? } } #[derive(Debug)] struct BasicLayer { blocks: Vec<TinyViTBlock>, downsample: Option<PatchMerging>, span: tracing::Span, } impl BasicLayer { #[allow(clippy::too_many_arguments)] fn new( dim: usize, input_resolution: (usize, usize), depth: usize, num_heads: usize, window_size: usize, downsample: bool, out: usize, vb: VarBuilder, ) -> Result<Self> { let vb_b = vb.pp("blocks"); let mut blocks = Vec::with_capacity(depth); for index in 0..depth { let block = TinyViTBlock::new( dim, input_resolution, num_heads, window_size, vb_b.pp(index), )?; blocks.push(block) } let downsample = if downsample { let downsample = PatchMerging::new(input_resolution, dim, out, vb.pp("downsample"))?; Some(downsample) } else { None }; let span = tracing::span!(tracing::Level::TRACE, "basic-layer"); Ok(Self { blocks, downsample, span, }) } } impl Module for BasicLayer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.clone(); for block in self.blocks.iter() { xs = block.forward(&xs)? } match &self.downsample { None => Ok(xs), Some(downsample) => downsample.forward(&xs), } } } #[derive(Debug)] pub struct TinyViT { patch_embed: PatchEmbed, layer0: ConvLayer, layers: Vec<BasicLayer>, // norm_head: candle_nn::LayerNorm, // head: candle_nn::Linear, neck_conv1: candle_nn::Conv2d, neck_ln1: super::LayerNorm2d, neck_conv2: candle_nn::Conv2d, neck_ln2: super::LayerNorm2d, span: tracing::Span, span_neck: tracing::Span, } impl TinyViT { pub fn new( embed_dims: &[usize], depths: &[usize], num_heads: &[usize], window_sizes: &[usize], _num_classes: usize, vb: VarBuilder, ) -> Result<Self> { let patch_embed = PatchEmbed::new(IN_CHANNELS, embed_dims[0], vb.pp("patch_embed"))?; let patches_resolution = IMG_SIZE / 4; let vb_l = vb.pp("layers"); let layer0 = ConvLayer::new( /* dim */ embed_dims[0], /* out */ embed_dims[1], /* input_resolution */ (patches_resolution, patches_resolution), /* depth */ depths[0], /* downsample */ true, /* conv_expand_ratio */ MBCONV_EXPAND_RATIO, vb_l.pp(0), )?; let num_layers = embed_dims.len(); let mut layers = Vec::with_capacity(num_layers - 1); for i_layer in 1..num_layers { let patches_resolution = patches_resolution / (1 << usize::min(i_layer, 2)); let layer = BasicLayer::new( /* dim */ embed_dims[i_layer], /* input_resolution */ (patches_resolution, patches_resolution), /* depth */ depths[i_layer], /* num_heads */ num_heads[i_layer], /* window_size */ window_sizes[i_layer], /* downsample */ i_layer < num_layers - 1, /* out */ embed_dims[usize::min(i_layer + 1, num_layers - 1)], vb_l.pp(i_layer), )?; layers.push(layer) } let last_embed_dim = embed_dims[embed_dims.len() - 1]; // let norm_head = candle_nn::layer_norm(last_embed_dim, 1e-5, vb.pp("norm_head"))?; // let head = candle_nn::linear(last_embed_dim, num_classes, vb.pp("head"))?; let neck_conv1 = candle_nn::conv2d_no_bias(last_embed_dim, 256, 1, Default::default(), vb.pp("neck.0"))?; let neck_ln1 = super::LayerNorm2d::new(256, 1e-6, vb.pp("neck.1"))?; let cfg = candle_nn::Conv2dConfig { padding: 1, ..Default::default() }; let neck_conv2 = candle_nn::conv2d_no_bias(256, 256, 3, cfg, vb.pp("neck.2"))?; let neck_ln2 = super::LayerNorm2d::new(256, 1e-6, vb.pp("neck.3"))?; let span = tracing::span!(tracing::Level::TRACE, "tiny-vit"); let span_neck = tracing::span!(tracing::Level::TRACE, "neck"); Ok(Self { patch_embed, layer0, layers, neck_conv1, neck_ln1, neck_conv2, neck_ln2, span, span_neck, }) } } impl Module for TinyViT { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = self.patch_embed.forward(xs)?; let mut xs = self.layer0.forward(&xs)?; for layer in self.layers.iter() { xs = layer.forward(&xs)? } let (b, _, c) = xs.dims3()?; let _enter = self.span_neck.enter(); xs.reshape((b, 64, 64, c))? .permute((0, 3, 1, 2))? .apply(&self.neck_conv1)? .apply(&self.neck_ln1)? .apply(&self.neck_conv2)? .apply(&self.neck_ln2) } } pub fn tiny_vit_5m(vb: VarBuilder) -> Result<TinyViT> { TinyViT::new( /* embed_dims */ &[64, 128, 160, 320], /* depths */ &[2, 2, 6, 2], /* num_heads */ &[2, 4, 5, 10], /* window_sizes */ &[7, 7, 14, 7], /* num_classes */ 1000, vb, ) }
candle/candle-transformers/src/models/segment_anything/tiny_vit.rs/0
{ "file_path": "candle/candle-transformers/src/models/segment_anything/tiny_vit.rs", "repo_id": "candle", "token_count": 10372 }
#![allow(dead_code)] //! # Variational Auto-Encoder (VAE) Models. //! //! Auto-encoder models compress their input to a usually smaller latent space //! before expanding it back to its original shape. This results in the latent values //! compressing the original information. use super::unet_2d_blocks::{ DownEncoderBlock2D, DownEncoderBlock2DConfig, UNetMidBlock2D, UNetMidBlock2DConfig, UpDecoderBlock2D, UpDecoderBlock2DConfig, }; use candle::{Result, Tensor}; use candle_nn as nn; use candle_nn::Module; #[derive(Debug, Clone)] struct EncoderConfig { // down_block_types: DownEncoderBlock2D block_out_channels: Vec<usize>, layers_per_block: usize, norm_num_groups: usize, double_z: bool, } impl Default for EncoderConfig { fn default() -> Self { Self { block_out_channels: vec![64], layers_per_block: 2, norm_num_groups: 32, double_z: true, } } } #[derive(Debug)] struct Encoder { conv_in: nn::Conv2d, down_blocks: Vec<DownEncoderBlock2D>, mid_block: UNetMidBlock2D, conv_norm_out: nn::GroupNorm, conv_out: nn::Conv2d, #[allow(dead_code)] config: EncoderConfig, } impl Encoder { fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, config: EncoderConfig, ) -> Result<Self> { let conv_cfg = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv_in = nn::conv2d( in_channels, config.block_out_channels[0], 3, conv_cfg, vs.pp("conv_in"), )?; let mut down_blocks = vec![]; let vs_down_blocks = vs.pp("down_blocks"); for index in 0..config.block_out_channels.len() { let out_channels = config.block_out_channels[index]; let in_channels = if index > 0 { config.block_out_channels[index - 1] } else { config.block_out_channels[0] }; let is_final = index + 1 == config.block_out_channels.len(); let cfg = DownEncoderBlock2DConfig { num_layers: config.layers_per_block, resnet_eps: 1e-6, resnet_groups: config.norm_num_groups, add_downsample: !is_final, downsample_padding: 0, ..Default::default() }; let down_block = DownEncoderBlock2D::new( vs_down_blocks.pp(index.to_string()), in_channels, out_channels, cfg, )?; down_blocks.push(down_block) } let last_block_out_channels = *config.block_out_channels.last().unwrap(); let mid_cfg = UNetMidBlock2DConfig { resnet_eps: 1e-6, output_scale_factor: 1., attn_num_head_channels: None, resnet_groups: Some(config.norm_num_groups), ..Default::default() }; let mid_block = UNetMidBlock2D::new(vs.pp("mid_block"), last_block_out_channels, None, mid_cfg)?; let conv_norm_out = nn::group_norm( config.norm_num_groups, last_block_out_channels, 1e-6, vs.pp("conv_norm_out"), )?; let conv_out_channels = if config.double_z { 2 * out_channels } else { out_channels }; let conv_cfg = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv_out = nn::conv2d( last_block_out_channels, conv_out_channels, 3, conv_cfg, vs.pp("conv_out"), )?; Ok(Self { conv_in, down_blocks, mid_block, conv_norm_out, conv_out, config, }) } } impl Encoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = xs.apply(&self.conv_in)?; for down_block in self.down_blocks.iter() { xs = xs.apply(down_block)? } let xs = self .mid_block .forward(&xs, None)? .apply(&self.conv_norm_out)?; nn::ops::silu(&xs)?.apply(&self.conv_out) } } #[derive(Debug, Clone)] struct DecoderConfig { // up_block_types: UpDecoderBlock2D block_out_channels: Vec<usize>, layers_per_block: usize, norm_num_groups: usize, } impl Default for DecoderConfig { fn default() -> Self { Self { block_out_channels: vec![64], layers_per_block: 2, norm_num_groups: 32, } } } #[derive(Debug)] struct Decoder { conv_in: nn::Conv2d, up_blocks: Vec<UpDecoderBlock2D>, mid_block: UNetMidBlock2D, conv_norm_out: nn::GroupNorm, conv_out: nn::Conv2d, #[allow(dead_code)] config: DecoderConfig, } impl Decoder { fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, config: DecoderConfig, ) -> Result<Self> { let n_block_out_channels = config.block_out_channels.len(); let last_block_out_channels = *config.block_out_channels.last().unwrap(); let conv_cfg = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv_in = nn::conv2d( in_channels, last_block_out_channels, 3, conv_cfg, vs.pp("conv_in"), )?; let mid_cfg = UNetMidBlock2DConfig { resnet_eps: 1e-6, output_scale_factor: 1., attn_num_head_channels: None, resnet_groups: Some(config.norm_num_groups), ..Default::default() }; let mid_block = UNetMidBlock2D::new(vs.pp("mid_block"), last_block_out_channels, None, mid_cfg)?; let mut up_blocks = vec![]; let vs_up_blocks = vs.pp("up_blocks"); let reversed_block_out_channels: Vec<_> = config.block_out_channels.iter().copied().rev().collect(); for index in 0..n_block_out_channels { let out_channels = reversed_block_out_channels[index]; let in_channels = if index > 0 { reversed_block_out_channels[index - 1] } else { reversed_block_out_channels[0] }; let is_final = index + 1 == n_block_out_channels; let cfg = UpDecoderBlock2DConfig { num_layers: config.layers_per_block + 1, resnet_eps: 1e-6, resnet_groups: config.norm_num_groups, add_upsample: !is_final, ..Default::default() }; let up_block = UpDecoderBlock2D::new( vs_up_blocks.pp(index.to_string()), in_channels, out_channels, cfg, )?; up_blocks.push(up_block) } let conv_norm_out = nn::group_norm( config.norm_num_groups, config.block_out_channels[0], 1e-6, vs.pp("conv_norm_out"), )?; let conv_cfg = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv_out = nn::conv2d( config.block_out_channels[0], out_channels, 3, conv_cfg, vs.pp("conv_out"), )?; Ok(Self { conv_in, up_blocks, mid_block, conv_norm_out, conv_out, config, }) } } impl Decoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = self.mid_block.forward(&self.conv_in.forward(xs)?, None)?; for up_block in self.up_blocks.iter() { xs = up_block.forward(&xs)? } let xs = self.conv_norm_out.forward(&xs)?; let xs = nn::ops::silu(&xs)?; self.conv_out.forward(&xs) } } #[derive(Debug, Clone)] pub struct AutoEncoderKLConfig { pub block_out_channels: Vec<usize>, pub layers_per_block: usize, pub latent_channels: usize, pub norm_num_groups: usize, pub use_quant_conv: bool, pub use_post_quant_conv: bool, } impl Default for AutoEncoderKLConfig { fn default() -> Self { Self { block_out_channels: vec![64], layers_per_block: 1, latent_channels: 4, norm_num_groups: 32, use_quant_conv: true, use_post_quant_conv: true, } } } pub struct DiagonalGaussianDistribution { mean: Tensor, std: Tensor, } impl DiagonalGaussianDistribution { pub fn new(parameters: &Tensor) -> Result<Self> { let mut parameters = parameters.chunk(2, 1)?.into_iter(); let mean = parameters.next().unwrap(); let logvar = parameters.next().unwrap(); let std = (logvar * 0.5)?.exp()?; Ok(DiagonalGaussianDistribution { mean, std }) } pub fn sample(&self) -> Result<Tensor> { let sample = self.mean.randn_like(0., 1.); &self.mean + &self.std * sample } } // https://github.com/huggingface/diffusers/blob/970e30606c2944e3286f56e8eb6d3dc6d1eb85f7/src/diffusers/models/vae.py#L485 // This implementation is specific to the config used in stable-diffusion-v1-5 // https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/vae/config.json #[derive(Debug)] pub struct AutoEncoderKL { encoder: Encoder, decoder: Decoder, quant_conv: Option<nn::Conv2d>, post_quant_conv: Option<nn::Conv2d>, pub config: AutoEncoderKLConfig, } impl AutoEncoderKL { pub fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, config: AutoEncoderKLConfig, ) -> Result<Self> { let latent_channels = config.latent_channels; let encoder_cfg = EncoderConfig { block_out_channels: config.block_out_channels.clone(), layers_per_block: config.layers_per_block, norm_num_groups: config.norm_num_groups, double_z: true, }; let encoder = Encoder::new(vs.pp("encoder"), in_channels, latent_channels, encoder_cfg)?; let decoder_cfg = DecoderConfig { block_out_channels: config.block_out_channels.clone(), layers_per_block: config.layers_per_block, norm_num_groups: config.norm_num_groups, }; let decoder = Decoder::new(vs.pp("decoder"), latent_channels, out_channels, decoder_cfg)?; let conv_cfg = Default::default(); let quant_conv = { if config.use_quant_conv { Some(nn::conv2d( 2 * latent_channels, 2 * latent_channels, 1, conv_cfg, vs.pp("quant_conv"), )?) } else { None } }; let post_quant_conv = { if config.use_post_quant_conv { Some(nn::conv2d( latent_channels, latent_channels, 1, conv_cfg, vs.pp("post_quant_conv"), )?) } else { None } }; Ok(Self { encoder, decoder, quant_conv, post_quant_conv, config, }) } /// Returns the distribution in the latent space. pub fn encode(&self, xs: &Tensor) -> Result<DiagonalGaussianDistribution> { let xs = self.encoder.forward(xs)?; let parameters = match &self.quant_conv { None => xs, Some(quant_conv) => quant_conv.forward(&xs)?, }; DiagonalGaussianDistribution::new(&parameters) } /// Takes as input some sampled values. pub fn decode(&self, xs: &Tensor) -> Result<Tensor> { let xs = match &self.post_quant_conv { None => xs, Some(post_quant_conv) => &post_quant_conv.forward(xs)?, }; self.decoder.forward(xs) } }
candle/candle-transformers/src/models/stable_diffusion/vae.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/vae.rs", "repo_id": "candle", "token_count": 6467 }
use super::common::{AttnBlock, GlobalResponseNorm, LayerNormNoWeights, TimestepBlock, WLayerNorm}; use candle::{DType, Module, Result, Tensor, D}; use candle_nn::VarBuilder; #[derive(Debug)] pub struct ResBlockStageB { depthwise: candle_nn::Conv2d, norm: WLayerNorm, channelwise_lin1: candle_nn::Linear, channelwise_grn: GlobalResponseNorm, channelwise_lin2: candle_nn::Linear, } impl ResBlockStageB { pub fn new(c: usize, c_skip: usize, ksize: usize, vb: VarBuilder) -> Result<Self> { let cfg = candle_nn::Conv2dConfig { groups: c, padding: ksize / 2, ..Default::default() }; let depthwise = candle_nn::conv2d(c, c, ksize, cfg, vb.pp("depthwise"))?; let norm = WLayerNorm::new(c)?; let channelwise_lin1 = candle_nn::linear(c + c_skip, c * 4, vb.pp("channelwise.0"))?; let channelwise_grn = GlobalResponseNorm::new(4 * c, vb.pp("channelwise.2"))?; let channelwise_lin2 = candle_nn::linear(c * 4, c, vb.pp("channelwise.4"))?; Ok(Self { depthwise, norm, channelwise_lin1, channelwise_grn, channelwise_lin2, }) } pub fn forward(&self, xs: &Tensor, x_skip: Option<&Tensor>) -> Result<Tensor> { let x_res = xs; let xs = xs.apply(&self.depthwise)?.apply(&self.norm)?; let xs = match x_skip { None => xs.clone(), Some(x_skip) => Tensor::cat(&[&xs, x_skip], 1)?, }; let xs = xs .permute((0, 2, 3, 1))? .contiguous()? .apply(&self.channelwise_lin1)? .gelu()? .apply(&self.channelwise_grn)? .apply(&self.channelwise_lin2)? .permute((0, 3, 1, 2))?; xs + x_res } } #[derive(Debug)] struct SubBlock { res_block: ResBlockStageB, ts_block: TimestepBlock, attn_block: Option<AttnBlock>, } #[derive(Debug)] struct DownBlock { layer_norm: Option<WLayerNorm>, conv: Option<candle_nn::Conv2d>, sub_blocks: Vec<SubBlock>, } #[derive(Debug)] struct UpBlock { sub_blocks: Vec<SubBlock>, layer_norm: Option<WLayerNorm>, conv: Option<candle_nn::ConvTranspose2d>, } #[derive(Debug)] pub struct WDiffNeXt { clip_mapper: candle_nn::Linear, effnet_mappers: Vec<Option<candle_nn::Conv2d>>, seq_norm: LayerNormNoWeights, embedding_conv: candle_nn::Conv2d, embedding_ln: WLayerNorm, down_blocks: Vec<DownBlock>, up_blocks: Vec<UpBlock>, clf_ln: WLayerNorm, clf_conv: candle_nn::Conv2d, c_r: usize, patch_size: usize, } impl WDiffNeXt { #[allow(clippy::too_many_arguments)] pub fn new( c_in: usize, c_out: usize, c_r: usize, c_cond: usize, clip_embd: usize, patch_size: usize, use_flash_attn: bool, vb: VarBuilder, ) -> Result<Self> { const C_HIDDEN: [usize; 4] = [320, 640, 1280, 1280]; const BLOCKS: [usize; 4] = [4, 4, 14, 4]; const NHEAD: [usize; 4] = [1, 10, 20, 20]; const INJECT_EFFNET: [bool; 4] = [false, true, true, true]; const EFFNET_EMBD: usize = 16; let clip_mapper = candle_nn::linear(clip_embd, c_cond, vb.pp("clip_mapper"))?; let mut effnet_mappers = Vec::with_capacity(2 * INJECT_EFFNET.len()); let vb_e = vb.pp("effnet_mappers"); for (i, &inject) in INJECT_EFFNET.iter().enumerate() { let c = if inject { Some(candle_nn::conv2d( EFFNET_EMBD, c_cond, 1, Default::default(), vb_e.pp(i), )?) } else { None }; effnet_mappers.push(c) } for (i, &inject) in INJECT_EFFNET.iter().rev().enumerate() { let c = if inject { Some(candle_nn::conv2d( EFFNET_EMBD, c_cond, 1, Default::default(), vb_e.pp(i + INJECT_EFFNET.len()), )?) } else { None }; effnet_mappers.push(c) } let seq_norm = LayerNormNoWeights::new(c_cond)?; let embedding_ln = WLayerNorm::new(C_HIDDEN[0])?; let embedding_conv = candle_nn::conv2d( c_in * patch_size * patch_size, C_HIDDEN[0], 1, Default::default(), vb.pp("embedding.1"), )?; let mut down_blocks = Vec::with_capacity(C_HIDDEN.len()); for (i, &c_hidden) in C_HIDDEN.iter().enumerate() { let vb = vb.pp("down_blocks").pp(i); let (layer_norm, conv, start_layer_i) = if i > 0 { let layer_norm = WLayerNorm::new(C_HIDDEN[i - 1])?; let cfg = candle_nn::Conv2dConfig { stride: 2, ..Default::default() }; let conv = candle_nn::conv2d(C_HIDDEN[i - 1], c_hidden, 2, cfg, vb.pp("0.1"))?; (Some(layer_norm), Some(conv), 1) } else { (None, None, 0) }; let mut sub_blocks = Vec::with_capacity(BLOCKS[i]); let mut layer_i = start_layer_i; for _j in 0..BLOCKS[i] { let c_skip = if INJECT_EFFNET[i] { c_cond } else { 0 }; let res_block = ResBlockStageB::new(c_hidden, c_skip, 3, vb.pp(layer_i))?; layer_i += 1; let ts_block = TimestepBlock::new(c_hidden, c_r, vb.pp(layer_i))?; layer_i += 1; let attn_block = if i == 0 { None } else { let attn_block = AttnBlock::new( c_hidden, c_cond, NHEAD[i], true, use_flash_attn, vb.pp(layer_i), )?; layer_i += 1; Some(attn_block) }; let sub_block = SubBlock { res_block, ts_block, attn_block, }; sub_blocks.push(sub_block) } let down_block = DownBlock { layer_norm, conv, sub_blocks, }; down_blocks.push(down_block) } let mut up_blocks = Vec::with_capacity(C_HIDDEN.len()); for (i, &c_hidden) in C_HIDDEN.iter().enumerate().rev() { let vb = vb.pp("up_blocks").pp(C_HIDDEN.len() - 1 - i); let mut sub_blocks = Vec::with_capacity(BLOCKS[i]); let mut layer_i = 0; for j in 0..BLOCKS[i] { let c_skip = if INJECT_EFFNET[i] { c_cond } else { 0 }; let c_skip_res = if i < BLOCKS.len() - 1 && j == 0 { c_hidden + c_skip } else { c_skip }; let res_block = ResBlockStageB::new(c_hidden, c_skip_res, 3, vb.pp(layer_i))?; layer_i += 1; let ts_block = TimestepBlock::new(c_hidden, c_r, vb.pp(layer_i))?; layer_i += 1; let attn_block = if i == 0 { None } else { let attn_block = AttnBlock::new( c_hidden, c_cond, NHEAD[i], true, use_flash_attn, vb.pp(layer_i), )?; layer_i += 1; Some(attn_block) }; let sub_block = SubBlock { res_block, ts_block, attn_block, }; sub_blocks.push(sub_block) } let (layer_norm, conv) = if i > 0 { let layer_norm = WLayerNorm::new(C_HIDDEN[i - 1])?; let cfg = candle_nn::ConvTranspose2dConfig { stride: 2, ..Default::default() }; let conv = candle_nn::conv_transpose2d( c_hidden, C_HIDDEN[i - 1], 2, cfg, vb.pp(layer_i).pp(1), )?; (Some(layer_norm), Some(conv)) } else { (None, None) }; let up_block = UpBlock { layer_norm, conv, sub_blocks, }; up_blocks.push(up_block) } let clf_ln = WLayerNorm::new(C_HIDDEN[0])?; let clf_conv = candle_nn::conv2d( C_HIDDEN[0], 2 * c_out * patch_size * patch_size, 1, Default::default(), vb.pp("clf.1"), )?; Ok(Self { clip_mapper, effnet_mappers, seq_norm, embedding_conv, embedding_ln, down_blocks, up_blocks, clf_ln, clf_conv, c_r, patch_size, }) } fn gen_r_embedding(&self, r: &Tensor) -> Result<Tensor> { const MAX_POSITIONS: usize = 10000; let r = (r * MAX_POSITIONS as f64)?; let half_dim = self.c_r / 2; let emb = (MAX_POSITIONS as f64).ln() / (half_dim - 1) as f64; let emb = (Tensor::arange(0u32, half_dim as u32, r.device())?.to_dtype(DType::F32)? * -emb)? .exp()?; let emb = r.unsqueeze(1)?.broadcast_mul(&emb.unsqueeze(0)?)?; let emb = Tensor::cat(&[emb.sin()?, emb.cos()?], 1)?; let emb = if self.c_r % 2 == 1 { emb.pad_with_zeros(D::Minus1, 0, 1)? } else { emb }; emb.to_dtype(r.dtype()) } fn gen_c_embeddings(&self, clip: &Tensor) -> Result<Tensor> { clip.apply(&self.clip_mapper)?.apply(&self.seq_norm) } pub fn forward( &self, xs: &Tensor, r: &Tensor, effnet: &Tensor, clip: Option<&Tensor>, ) -> Result<Tensor> { const EPS: f64 = 1e-3; let r_embed = self.gen_r_embedding(r)?; let clip = match clip { None => None, Some(clip) => Some(self.gen_c_embeddings(clip)?), }; let x_in = xs; let mut xs = xs .apply(&|xs: &_| candle_nn::ops::pixel_unshuffle(xs, self.patch_size))? .apply(&self.embedding_conv)? .apply(&self.embedding_ln)?; let mut level_outputs = Vec::new(); for (i, down_block) in self.down_blocks.iter().enumerate() { if let Some(ln) = &down_block.layer_norm { xs = xs.apply(ln)? } if let Some(conv) = &down_block.conv { xs = xs.apply(conv)? } let skip = match &self.effnet_mappers[i] { None => None, Some(m) => { let effnet = effnet.interpolate2d(xs.dim(D::Minus2)?, xs.dim(D::Minus1)?)?; Some(m.forward(&effnet)?) } }; for block in down_block.sub_blocks.iter() { xs = block.res_block.forward(&xs, skip.as_ref())?; xs = block.ts_block.forward(&xs, &r_embed)?; if let Some(attn_block) = &block.attn_block { xs = attn_block.forward(&xs, clip.as_ref().unwrap())?; } } level_outputs.push(xs.clone()) } level_outputs.reverse(); let mut xs = level_outputs[0].clone(); for (i, up_block) in self.up_blocks.iter().enumerate() { let effnet_c = match &self.effnet_mappers[self.down_blocks.len() + i] { None => None, Some(m) => { let effnet = effnet.interpolate2d(xs.dim(D::Minus2)?, xs.dim(D::Minus1)?)?; Some(m.forward(&effnet)?) } }; for (j, block) in up_block.sub_blocks.iter().enumerate() { let skip = if j == 0 && i > 0 { Some(&level_outputs[i]) } else { None }; let skip = match (skip, effnet_c.as_ref()) { (Some(skip), Some(effnet_c)) => Some(Tensor::cat(&[skip, effnet_c], 1)?), (None, Some(skip)) | (Some(skip), None) => Some(skip.clone()), (None, None) => None, }; xs = block.res_block.forward(&xs, skip.as_ref())?; xs = block.ts_block.forward(&xs, &r_embed)?; if let Some(attn_block) = &block.attn_block { xs = attn_block.forward(&xs, clip.as_ref().unwrap())?; } } if let Some(ln) = &up_block.layer_norm { xs = xs.apply(ln)? } if let Some(conv) = &up_block.conv { xs = xs.apply(conv)? } } let ab = xs .apply(&self.clf_ln)? .apply(&self.clf_conv)? .apply(&|xs: &_| candle_nn::ops::pixel_shuffle(xs, self.patch_size))? .chunk(2, 1)?; let b = ((candle_nn::ops::sigmoid(&ab[1])? * (1. - EPS * 2.))? + EPS)?; (x_in - &ab[0])? / b } }
candle/candle-transformers/src/models/wuerstchen/diffnext.rs/0
{ "file_path": "candle/candle-transformers/src/models/wuerstchen/diffnext.rs", "repo_id": "candle", "token_count": 8148 }
//load Candle Bert Module wasm module import init, { Model } from "./build/m.js"; async function fetchArrayBuffer(url) { const cacheName = "bert-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Bert { static instance = {}; static async getInstance(weightsURL, tokenizerURL, configURL, modelID) { if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8, mel_filtersArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new Model( weightsArrayU8, tokenizerArrayU8, mel_filtersArrayU8 ); } else { self.postMessage({ status: "ready", message: "Model Already Loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { weightsURL, tokenizerURL, configURL, modelID, sentences, normalize = true, } = event.data; try { self.postMessage({ status: "ready", message: "Starting Bert Model" }); const model = await Bert.getInstance( weightsURL, tokenizerURL, configURL, modelID ); self.postMessage({ status: "embedding", message: "Calculating Embeddings", }); const output = model.get_embeddings({ sentences: sentences, normalize_embeddings: normalize, }); self.postMessage({ status: "complete", message: "complete", output: output.data, }); } catch (e) { self.postMessage({ error: e }); } });
candle/candle-wasm-examples/bert/bertWorker.js/0
{ "file_path": "candle/candle-wasm-examples/bert/bertWorker.js", "repo_id": "candle", "token_count": 779 }
import init, { Model } from "./build/m.js"; async function fetchArrayBuffer(url, cacheModel = true) { if (!cacheModel) return new Uint8Array(await (await fetch(url)).arrayBuffer()); const cacheName = "moondream-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } async function concatenateArrayBuffers(urls) { const arrayBuffers = await Promise.all( urls.map((url) => fetchArrayBuffer(url)) ); let totalLength = arrayBuffers.reduce( (acc, arrayBuffer) => acc + arrayBuffer.byteLength, 0 ); let concatenatedBuffer = new Uint8Array(totalLength); let offset = 0; arrayBuffers.forEach((buffer) => { concatenatedBuffer.set(new Uint8Array(buffer), offset); offset += buffer.byteLength; }); return concatenatedBuffer; } class Moondream { static imageArrayHash = {}; static instance = {}; static currentModelID = null; static async getInstance(weightsURL, modelID, tokenizerURL, quantized) { // load individual modelID only once if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8] = await Promise.all([ weightsURL instanceof Array ? concatenateArrayBuffers(weightsURL) : fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), ]); this.instance[modelID] = new Model( weightsArrayU8, tokenizerArrayU8, quantized ); } this.currentModelID = modelID; return this.instance[modelID]; } // Remove the modelID parameter from setImageEmbeddings static setImageEmbeddings(imageArrayU8) { // check if image embeddings are already set for this image and model const imageArrayHash = this.getSimpleHash(imageArrayU8); if ( this.imageArrayHash[this.currentModelID] === imageArrayHash && this.instance[this.currentModelID] ) { self.postMessage({ status: "embedding", message: "Embeddings Already Set", }); return; } this.imageArrayHash[this.currentModelID] = imageArrayHash; this.instance[this.currentModelID].set_image_embeddings(imageArrayU8); self.postMessage({ status: "embedding", message: "Embeddings Set" }); } static getSimpleHash(imageArrayU8) { // get simple hash of imageArrayU8 let imageArrayHash = 0; for (let i = 0; i < imageArrayU8.length; i += 100) { imageArrayHash ^= imageArrayU8[i]; } return imageArrayHash.toString(16); } } let controller = null; self.addEventListener("message", (event) => { if (event.data.command === "start") { controller = new AbortController(); generate(event.data); } else if (event.data.command === "abort") { controller.abort(); } }); async function generate(data) { const { weightsURL, modelID, tokenizerURL, quantized, imageURL, prompt, seed, temp, top_p, repeatPenalty, maxSeqLen, verbose_prompt, } = data; try { self.postMessage({ status: "loading", message: "Starting Moondream" }); const model = await Moondream.getInstance( weightsURL, modelID, tokenizerURL, quantized ); self.postMessage({ status: "loading", message: "Initializing model" }); self.postMessage({ status: "loading", message: "Loading Image" }); const imageArrayU8 = await fetchArrayBuffer(imageURL, false); self.postMessage({ status: "embedding", message: "Creating Embeddings" }); Moondream.setImageEmbeddings(imageArrayU8); self.postMessage({ status: "complete-embedding", message: "Embeddings Complete", }); const { token, token_id } = model.init_with_image_prompt({ prompt, seed: BigInt(seed), temp: parseFloat(temp), top_p: parseFloat(top_p), repeat_penalty: parseFloat(repeatPenalty), repeat_last_n: 64, verbose_prompt, }); const seq_len = 2048; let sentence = token; let maxTokens = maxSeqLen ? maxSeqLen : seq_len - prompt.length - 1; let startTime = performance.now(); let tokensCount = 0; while (tokensCount < maxTokens) { await new Promise(async (resolve) => { if (controller && controller.signal.aborted) { console.log("Aborted"); self.postMessage({ status: "aborted", message: "Aborted", output: prompt + sentence, }); return; } const { token, token_id } = await model.next_token(); if (token_id === 50256) { // <|endoftext|> self.postMessage({ status: "complete", message: "complete", output: prompt + sentence, }); return; } const tokensSec = ((tokensCount + 1) / (performance.now() - startTime)) * 1000; sentence += token; self.postMessage({ status: "generating", message: "Generating token", token: token, sentence: sentence, totalTime: performance.now() - startTime, tokensSec, prompt: prompt, }); setTimeout(resolve, 0); }); tokensCount++; } self.postMessage({ status: "complete", message: "complete", output: prompt + sentence, }); } catch (e) { self.postMessage({ error: e }); } }
candle/candle-wasm-examples/moondream/moondreamWorker.js/0
{ "file_path": "candle/candle-wasm-examples/moondream/moondreamWorker.js", "repo_id": "candle", "token_count": 2273 }
Dockerfile .vscode/ .idea .gitignore LICENSE README.md node_modules/ .svelte-kit/ .env* !.env .env.local
chat-ui/.dockerignore/0
{ "file_path": "chat-ui/.dockerignore", "repo_id": "chat-ui", "token_count": 51 }
.DS_Store node_modules /build /.svelte-kit /package /chart .env .env.* !.env.example # Ignore files for PNPM, NPM and YARN pnpm-lock.yaml package-lock.json yarn.lock
chat-ui/.prettierignore/0
{ "file_path": "chat-ui/.prettierignore", "repo_id": "chat-ui", "token_count": 72 }
{{- if $.Values.ingress.enabled }} apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: {{ toYaml .Values.ingress.annotations | nindent 4 }} labels: {{ include "labels.standard" . | nindent 4 }} name: {{ include "name" . }} namespace: {{ .Release.Namespace }} spec: {{ if $.Values.ingress.className }} ingressClassName: {{ .Values.ingress.className }} {{ end }} {{- with .Values.ingress.tls }} tls: - hosts: - {{ $.Values.domain | quote }} {{- with .secretName }} secretName: {{ . }} {{- end }} {{- end }} rules: - host: {{ .Values.domain }} http: paths: - backend: service: name: {{ include "name" . }} port: name: http path: {{ $.Values.ingress.path | default "/" }} pathType: Prefix {{- end }}
chat-ui/chart/templates/ingress.yaml/0
{ "file_path": "chat-ui/chart/templates/ingress.yaml", "repo_id": "chat-ui", "token_count": 400 }
# Google | Feature | Available | | --------------------------- | --------- | | [Tools](../tools) | No | | [Multimodal](../multimodal) | No | Chat UI can connect to the google Vertex API endpoints ([List of supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models)). To enable: 1. [Select](https://console.cloud.google.com/project) or [create](https://cloud.google.com/resource-manager/docs/creating-managing-projects#creating_a_project) a Google Cloud project. 1. [Enable billing for your project](https://cloud.google.com/billing/docs/how-to/modify-project). 1. [Enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com). 1. [Set up authentication with a service account](https://cloud.google.com/docs/authentication/getting-started) so you can access the API from your local workstation. The service account credentials file can be imported as an environmental variable: ```ini GOOGLE_APPLICATION_CREDENTIALS = clientid.json ``` Make sure your docker container has access to the file and the variable is correctly set. Afterwards Google Vertex endpoints can be configured as following: ```ini MODELS=`[ { "name": "gemini-1.5-pro", "displayName": "Vertex Gemini Pro 1.5", "endpoints" : [{ "type": "vertex", "project": "abc-xyz", "location": "europe-west3", "extraBody": { "model_version": "gemini-1.5-pro-002", }, // Optional "safetyThreshold": "BLOCK_MEDIUM_AND_ABOVE", "apiEndpoint": "", // alternative api endpoint url, "tools": [{ "googleSearchRetrieval": { "disableAttribution": true } }] }] } ]` ``` ## GenAI Or use the Gemini API API provider [from](https://github.com/google-gemini/generative-ai-js#readme): Make sure that you have an API key from Google Cloud Platform. To get an API key, follow the instructions [here](https://ai.google.dev/gemini-api/docs/api-key). You can either specify them directly in your `.env.local` using the `GOOGLE_GENAI_API_KEY` variables, or you can set them directly in the endpoint config. You can find the list of models available [here](https://ai.google.dev/gemini-api/docs/models/gemini), and experimental models available [here](https://ai.google.dev/gemini-api/docs/models/experimental-models). ```ini MODELS=`[ { "name": "gemini-1.5-flash", "displayName": "Gemini Flash 1.5", "multimodal": true, "endpoints": [ { "type": "genai", // Optional "apiKey": "abc...xyz" "safetyThreshold": "BLOCK_MEDIUM_AND_ABOVE", } ] }, { "name": "gemini-1.5-pro", "displayName": "Gemini Pro 1.5", "multimodal": false, "endpoints": [ { "type": "genai", // Optional "apiKey": "abc...xyz" } ] } ]` ```
chat-ui/docs/source/configuration/models/providers/google.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/providers/google.md", "repo_id": "chat-ui", "token_count": 1138 }
# Running Locally You may start an instance locally for non-production use cases. For production use cases, please see the other installation options. ## Configuration The default config for Chat UI is stored in the `.env` file. You will need to override some values to get Chat UI to run locally. Start by creating a `.env.local` file in the root of the repository as per the [configuration section](../configuration/overview). The bare minimum config you need to get Chat UI to run locally is the following: ```ini MONGODB_URL=<the URL to your MongoDB instance> HF_TOKEN=<your access token> # find your token at hf.co/settings/token ``` ## Database The chat history is stored in a MongoDB instance, and having a DB instance available is needed for Chat UI to work. You can use a local MongoDB instance. The easiest way is to spin one up using docker with persistence: ```bash docker run -d -p 27017:27017 -v mongo-chat-ui:/data --name mongo-chat-ui mongo:latest ``` In which case the url of your DB will be `MONGODB_URL=mongodb://localhost:27017`. Alternatively, you can use a [free MongoDB Atlas](https://www.mongodb.com/pricing) instance for this, Chat UI should fit comfortably within their free tier. After which you can set the `MONGODB_URL` variable in `.env.local` to match your instance. ## Starting the server ```bash npm ci # install dependencies npm run build # build the project npm run preview -- --open # start the server with & open your instance at http://localhost:4173 ```
chat-ui/docs/source/installation/local.md/0
{ "file_path": "chat-ui/docs/source/installation/local.md", "repo_id": "chat-ui", "token_count": 416 }
<script lang="ts"> interface Props { title?: string; classNames?: string; children?: import("svelte").Snippet; } let { title = "", classNames = "", children }: Props = $props(); </script> <div class="flex items-center rounded-xl bg-gray-100 p-1 text-sm dark:bg-gray-800 {classNames}"> <span class="from-primary-300 text-primary-700 dark:from-primary-900 dark:text-primary-400 mr-2 inline-flex items-center rounded-lg bg-gradient-to-br px-2 py-1 text-xxs font-medium uppercase leading-3" >New</span > {title} <div class="ml-auto shrink-0"> {@render children?.()} </div> </div>
chat-ui/src/lib/components/AnnouncementBanner.svelte/0
{ "file_path": "chat-ui/src/lib/components/AnnouncementBanner.svelte", "repo_id": "chat-ui", "token_count": 235 }
<script lang="ts"> import { MessageWebSearchUpdateType, type MessageWebSearchUpdate, } from "$lib/types/MessageUpdate"; import { isMessageWebSearchSourcesUpdate } from "$lib/utils/messageUpdates"; import CarbonError from "~icons/carbon/error-filled"; import EosIconsLoading from "~icons/eos-icons/loading"; import IconInternet from "./icons/IconInternet.svelte"; import CarbonCaretDown from "~icons/carbon/caret-down"; interface Props { webSearchMessages?: MessageWebSearchUpdate[]; } let { webSearchMessages = [] }: Props = $props(); let sources = $derived(webSearchMessages.find(isMessageWebSearchSourcesUpdate)?.sources); let lastMessage = $derived( webSearchMessages .filter((update) => update.subtype !== MessageWebSearchUpdateType.Sources) .at(-1) as MessageWebSearchUpdate ); let errored = $derived( webSearchMessages.some((update) => update.subtype === MessageWebSearchUpdateType.Error) ); let loading = $derived(!sources && !errored); </script> <details class="group flex w-fit max-w-full flex-col rounded-xl border border-gray-200 bg-white shadow-sm dark:border-gray-800 dark:bg-gray-900" > <summary class="grid min-w-72 cursor-pointer select-none grid-cols-[40px,1fr,24px] items-center gap-2.5 rounded-xl p-2 group-open:rounded-b-none hover:bg-gray-500/10" > <div class="relative grid aspect-square place-content-center overflow-hidden rounded-lg bg-gray-100 dark:bg-gray-800" > <svg class="absolute inset-0 text-gray-300 transition-opacity dark:text-gray-700 {loading ? 'opacity-100' : 'opacity-0'}" width="40" height="40" viewBox="0 0 38 38" fill="none" xmlns="http://www.w3.org/2000/svg" > <path class="loading-path" d="M8 2.5H30C30 2.5 35.5 2.5 35.5 8V30C35.5 30 35.5 35.5 30 35.5H8C8 35.5 2.5 35.5 2.5 30V8C2.5 8 2.5 2.5 8 2.5Z" stroke="currentColor" stroke-width="1" stroke-linecap="round" id="shape" /> </svg> <IconInternet classNames="relative fill-current text-xl" /> </div> <dl class="leading-4"> <dd class="text-sm">Web Search</dd> <dt class="flex items-center gap-1 truncate whitespace-nowrap text-[.82rem] text-gray-400"> {#if sources} Completed {:else} {"message" in lastMessage ? lastMessage.message : "An error occurred"} {/if} </dt> </dl> <CarbonCaretDown class="size-6 text-gray-400 transition-transform group-open:rotate-180" /> </summary> <div class="content px-5 pb-5 pt-4"> {#if webSearchMessages.length === 0} <div class="mx-auto w-fit"> <EosIconsLoading class="mb-3 h-4 w-4" /> </div> {:else} <ol> {#each webSearchMessages as message} {#if message.subtype === MessageWebSearchUpdateType.Update} <li class="group border-l pb-6 last:!border-transparent last:pb-0 dark:border-gray-800"> <div class="flex items-start"> <div class="-ml-1.5 h-3 w-3 flex-none rounded-full bg-gray-200 dark:bg-gray-600 {loading ? 'group-last:animate-pulse group-last:bg-gray-300 group-last:dark:bg-gray-500' : ''}" ></div> <h3 class="text-md -mt-1.5 pl-2.5 text-gray-800 dark:text-gray-100"> {message.message} </h3> </div> {#if message.args} <p class="mt-0.5 pl-4 text-gray-500 dark:text-gray-400"> {message.args} </p> {/if} </li> {:else if message.subtype === MessageWebSearchUpdateType.Error} <li class="group border-l pb-6 last:!border-transparent last:pb-0 dark:border-gray-800"> <div class="flex items-start"> <CarbonError class="-ml-1.5 h-3 w-3 flex-none scale-110 text-red-700 dark:text-red-500" /> <h3 class="text-md -mt-1.5 pl-2.5 text-red-700 dark:text-red-500"> {message.message} </h3> </div> {#if message.args} <p class="mt-0.5 pl-4 text-gray-500 dark:text-gray-400"> {message.args} </p> {/if} </li> {/if} {/each} </ol> {/if} </div> </details> <style> details summary::-webkit-details-marker { display: none; } .loading-path { stroke-dasharray: 61.45; animation: loading 2s linear infinite; } @keyframes loading { to { stroke-dashoffset: 122.9; } } </style>
chat-ui/src/lib/components/OpenWebSearchResults.svelte/0
{ "file_path": "chat-ui/src/lib/components/OpenWebSearchResults.svelte", "repo_id": "chat-ui", "token_count": 1931 }
<script lang="ts"> import CarbonUpload from "~icons/carbon/upload"; interface Props { classNames?: string; files: File[]; mimeTypes: string[]; } let { classNames = "", files = $bindable(), mimeTypes }: Props = $props(); /** * Due to a bug with Svelte, we cannot use bind:files with multiple * So we use this workaround **/ const onFileChange = (e: Event) => { if (!e.target) return; const target = e.target as HTMLInputElement; files = [...files, ...(target.files ?? [])]; }; </script> <button class="btn relative h-8 rounded-lg border bg-white px-3 py-1 text-sm text-gray-500 shadow-sm hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-700 dark:text-gray-300 dark:hover:bg-gray-600 {classNames}" > <input class="absolute w-full cursor-pointer opacity-0" aria-label="Upload file" type="file" onchange={onFileChange} accept={mimeTypes.join(",")} /> <CarbonUpload class="mr-2 text-xxs" /> Upload file </button>
chat-ui/src/lib/components/UploadBtn.svelte/0
{ "file_path": "chat-ui/src/lib/components/UploadBtn.svelte", "repo_id": "chat-ui", "token_count": 351 }
<script lang="ts"> interface Props { classNames?: string; } let { classNames = "" }: Props = $props(); </script> <svg class={classNames} xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32" > <path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)" /> <path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)" /><rect fill="none" width="32" height="32" /> </svg>
chat-ui/src/lib/components/icons/IconCopy.svelte/0
{ "file_path": "chat-ui/src/lib/components/icons/IconCopy.svelte", "repo_id": "chat-ui", "token_count": 324 }
import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; /** * Returns the lock id if the lock was acquired, false otherwise */ export async function acquireLock(key: string): Promise<ObjectId | false> { try { const id = new ObjectId(); const insert = await collections.semaphores.insertOne({ _id: id, key, createdAt: new Date(), updatedAt: new Date(), }); return insert.acknowledged ? id : false; // true if the document was inserted } catch (e) { // unique index violation, so there must already be a lock return false; } } export async function releaseLock(key: string, lockId: ObjectId) { await collections.semaphores.deleteOne({ _id: lockId, key, }); } export async function isDBLocked(key: string): Promise<boolean> { const res = await collections.semaphores.countDocuments({ key, }); return res > 0; } export async function refreshLock(key: string, lockId: ObjectId): Promise<boolean> { const result = await collections.semaphores.updateOne( { _id: lockId, key, }, { $set: { updatedAt: new Date(), }, } ); return result.matchedCount > 0; }
chat-ui/src/lib/migrations/lock.ts/0
{ "file_path": "chat-ui/src/lib/migrations/lock.ts", "repo_id": "chat-ui", "token_count": 400 }
import { env } from "$env/dynamic/private"; import { GridFSBucket, MongoClient } from "mongodb"; import type { Conversation } from "$lib/types/Conversation"; import type { SharedConversation } from "$lib/types/SharedConversation"; import type { AbortedGeneration } from "$lib/types/AbortedGeneration"; import type { Settings } from "$lib/types/Settings"; import type { User } from "$lib/types/User"; import type { MessageEvent } from "$lib/types/MessageEvent"; import type { Session } from "$lib/types/Session"; import type { Assistant } from "$lib/types/Assistant"; import type { Report } from "$lib/types/Report"; import type { ConversationStats } from "$lib/types/ConversationStats"; import type { MigrationResult } from "$lib/types/MigrationResult"; import type { Semaphore } from "$lib/types/Semaphore"; import type { AssistantStats } from "$lib/types/AssistantStats"; import type { CommunityToolDB } from "$lib/types/Tool"; import { logger } from "$lib/server/logger"; import { building } from "$app/environment"; import type { TokenCache } from "$lib/types/TokenCache"; import { onExit } from "./exitHandler"; export const CONVERSATION_STATS_COLLECTION = "conversations.stats"; export class Database { private client: MongoClient; private static instance: Database; private constructor() { if (!env.MONGODB_URL) { throw new Error( "Please specify the MONGODB_URL environment variable inside .env.local. Set it to mongodb://localhost:27017 if you are running MongoDB locally, or to a MongoDB Atlas free instance for example." ); } this.client = new MongoClient(env.MONGODB_URL, { directConnection: env.MONGODB_DIRECT_CONNECTION === "true", }); this.client.connect().catch((err) => { logger.error(err, "Connection error"); process.exit(1); }); this.client.db(env.MONGODB_DB_NAME + (import.meta.env.MODE === "test" ? "-test" : "")); this.client.on("open", () => this.initDatabase()); // Disconnect DB on exit onExit(() => this.client.close(true)); } public static getInstance(): Database { if (!Database.instance) { Database.instance = new Database(); } return Database.instance; } /** * Return mongoClient */ public getClient(): MongoClient { return this.client; } /** * Return map of database's collections */ public getCollections() { const db = this.client.db( env.MONGODB_DB_NAME + (import.meta.env.MODE === "test" ? "-test" : "") ); const conversations = db.collection<Conversation>("conversations"); const conversationStats = db.collection<ConversationStats>(CONVERSATION_STATS_COLLECTION); const assistants = db.collection<Assistant>("assistants"); const assistantStats = db.collection<AssistantStats>("assistants.stats"); const reports = db.collection<Report>("reports"); const sharedConversations = db.collection<SharedConversation>("sharedConversations"); const abortedGenerations = db.collection<AbortedGeneration>("abortedGenerations"); const settings = db.collection<Settings>("settings"); const users = db.collection<User>("users"); const sessions = db.collection<Session>("sessions"); const messageEvents = db.collection<MessageEvent>("messageEvents"); const bucket = new GridFSBucket(db, { bucketName: "files" }); const migrationResults = db.collection<MigrationResult>("migrationResults"); const semaphores = db.collection<Semaphore>("semaphores"); const tokenCaches = db.collection<TokenCache>("tokens"); const tools = db.collection<CommunityToolDB>("tools"); return { conversations, conversationStats, assistants, assistantStats, reports, sharedConversations, abortedGenerations, settings, users, sessions, messageEvents, bucket, migrationResults, semaphores, tokenCaches, tools, }; } /** * Init database once connected: Index creation * @private */ private initDatabase() { const { conversations, conversationStats, assistants, assistantStats, reports, sharedConversations, abortedGenerations, settings, users, sessions, messageEvents, semaphores, tokenCaches, tools, } = this.getCollections(); conversations .createIndex( { sessionId: 1, updatedAt: -1 }, { partialFilterExpression: { sessionId: { $exists: true } } } ) .catch((e) => logger.error(e)); conversations .createIndex( { userId: 1, updatedAt: -1 }, { partialFilterExpression: { userId: { $exists: true } } } ) .catch((e) => logger.error(e)); conversations .createIndex( { "message.id": 1, "message.ancestors": 1 }, { partialFilterExpression: { userId: { $exists: true } } } ) .catch((e) => logger.error(e)); // Not strictly necessary, could use _id, but more convenient. Also for stats // To do stats on conversation messages conversations .createIndex({ "messages.createdAt": 1 }, { sparse: true }) .catch((e) => logger.error(e)); // Unique index for stats conversationStats .createIndex( { type: 1, "date.field": 1, "date.span": 1, "date.at": 1, distinct: 1, }, { unique: true } ) .catch((e) => logger.error(e)); // Allow easy check of last computed stat for given type/dateField conversationStats .createIndex({ type: 1, "date.field": 1, "date.at": 1, }) .catch((e) => logger.error(e)); abortedGenerations .createIndex({ updatedAt: 1 }, { expireAfterSeconds: 30 }) .catch((e) => logger.error(e)); abortedGenerations .createIndex({ conversationId: 1 }, { unique: true }) .catch((e) => logger.error(e)); sharedConversations.createIndex({ hash: 1 }, { unique: true }).catch((e) => logger.error(e)); settings .createIndex({ sessionId: 1 }, { unique: true, sparse: true }) .catch((e) => logger.error(e)); settings .createIndex({ userId: 1 }, { unique: true, sparse: true }) .catch((e) => logger.error(e)); settings.createIndex({ assistants: 1 }).catch((e) => logger.error(e)); users.createIndex({ hfUserId: 1 }, { unique: true }).catch((e) => logger.error(e)); users .createIndex({ sessionId: 1 }, { unique: true, sparse: true }) .catch((e) => logger.error(e)); // No unicity because due to renames & outdated info from oauth provider, there may be the same username on different users users.createIndex({ username: 1 }).catch((e) => logger.error(e)); messageEvents .createIndex({ createdAt: 1 }, { expireAfterSeconds: 60 }) .catch((e) => logger.error(e)); sessions.createIndex({ expiresAt: 1 }, { expireAfterSeconds: 0 }).catch((e) => logger.error(e)); sessions.createIndex({ sessionId: 1 }, { unique: true }).catch((e) => logger.error(e)); assistants.createIndex({ createdById: 1, userCount: -1 }).catch((e) => logger.error(e)); assistants.createIndex({ userCount: 1 }).catch((e) => logger.error(e)); assistants.createIndex({ review: 1, userCount: -1 }).catch((e) => logger.error(e)); assistants.createIndex({ modelId: 1, userCount: -1 }).catch((e) => logger.error(e)); assistants.createIndex({ searchTokens: 1 }).catch((e) => logger.error(e)); assistants.createIndex({ last24HoursCount: 1 }).catch((e) => logger.error(e)); assistants .createIndex({ last24HoursUseCount: -1, useCount: -1, _id: 1 }) .catch((e) => logger.error(e)); assistantStats // Order of keys is important for the queries .createIndex({ "date.span": 1, "date.at": 1, assistantId: 1 }, { unique: true }) .catch((e) => logger.error(e)); reports.createIndex({ assistantId: 1 }).catch((e) => logger.error(e)); reports.createIndex({ createdBy: 1, assistantId: 1 }).catch((e) => logger.error(e)); // Unique index for semaphore and migration results semaphores.createIndex({ key: 1 }, { unique: true }).catch((e) => logger.error(e)); semaphores .createIndex({ createdAt: 1 }, { expireAfterSeconds: 60 }) .catch((e) => logger.error(e)); tokenCaches .createIndex({ createdAt: 1 }, { expireAfterSeconds: 5 * 60 }) .catch((e) => logger.error(e)); tokenCaches.createIndex({ tokenHash: 1 }).catch((e) => logger.error(e)); tools.createIndex({ createdById: 1, userCount: -1 }).catch((e) => logger.error(e)); tools.createIndex({ userCount: 1 }).catch((e) => logger.error(e)); tools.createIndex({ last24HoursCount: 1 }).catch((e) => logger.error(e)); conversations .createIndex({ "messages.from": 1, createdAt: 1, }) .catch((e) => logger.error(e)); conversations .createIndex({ userId: 1, sessionId: 1, }) .catch((e) => logger.error(e)); } } export const collections = building ? ({} as unknown as ReturnType<typeof Database.prototype.getCollections>) : Database.getInstance().getCollections();
chat-ui/src/lib/server/database.ts/0
{ "file_path": "chat-ui/src/lib/server/database.ts", "repo_id": "chat-ui", "token_count": 3102 }
import { GoogleGenerativeAI, HarmBlockThreshold, HarmCategory } from "@google/generative-ai"; import type { Content, Part, SafetySetting, TextPart } from "@google/generative-ai"; import { z } from "zod"; import type { Message, MessageFile } from "$lib/types/Message"; import type { TextGenerationStreamOutput } from "@huggingface/inference"; import type { Endpoint } from "../endpoints"; import { createImageProcessorOptionsValidator, makeImageProcessor } from "../images"; import type { ImageProcessorOptions } from "../images"; import { env } from "$env/dynamic/private"; export const endpointGenAIParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("genai"), apiKey: z.string().default(env.GOOGLE_GENAI_API_KEY), safetyThreshold: z .enum([ HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED, HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, HarmBlockThreshold.BLOCK_NONE, HarmBlockThreshold.BLOCK_ONLY_HIGH, ]) .optional(), multimodal: z .object({ image: createImageProcessorOptionsValidator({ supportedMimeTypes: ["image/png", "image/jpeg", "image/webp"], preferredMimeType: "image/webp", // The 4 / 3 compensates for the 33% increase in size when converting to base64 maxSizeInMB: (5 / 4) * 3, maxWidth: 4096, maxHeight: 4096, }), }) .default({}), }); export function endpointGenAI(input: z.input<typeof endpointGenAIParametersSchema>): Endpoint { const { model, apiKey, safetyThreshold, multimodal } = endpointGenAIParametersSchema.parse(input); const genAI = new GoogleGenerativeAI(apiKey); const safetySettings = safetyThreshold ? Object.keys(HarmCategory) .filter((cat) => cat !== HarmCategory.HARM_CATEGORY_UNSPECIFIED) .reduce((acc, val) => { acc.push({ category: val as HarmCategory, threshold: safetyThreshold, }); return acc; }, [] as SafetySetting[]) : undefined; return async ({ messages, preprompt, generateSettings }) => { const parameters = { ...model.parameters, ...generateSettings }; const generativeModel = genAI.getGenerativeModel({ model: model.id ?? model.name, safetySettings, generationConfig: { maxOutputTokens: parameters?.max_new_tokens ?? 4096, stopSequences: parameters?.stop, temperature: parameters?.temperature ?? 1, }, }); let systemMessage = preprompt; if (messages[0].from === "system") { systemMessage = messages[0].content; messages.shift(); } const genAIMessages = await Promise.all( messages.map(async ({ from, content, files }: Omit<Message, "id">): Promise<Content> => { return { role: from === "user" ? "user" : "model", parts: [ ...(await Promise.all( (files ?? []).map((file) => fileToImageBlock(file, multimodal.image)) )), { text: content }, ], }; }) ); const result = await generativeModel.generateContentStream({ contents: genAIMessages, systemInstruction: systemMessage && systemMessage.trim() !== "" ? { role: "system", parts: [{ text: systemMessage }], } : undefined, }); let tokenId = 0; return (async function* () { let generatedText = ""; for await (const data of result.stream) { if (!data?.candidates?.length) break; // Handle case where no candidates are present const candidate = data.candidates[0]; if (!candidate.content?.parts?.length) continue; // Skip if no parts are present const firstPart = candidate.content.parts.find((part) => "text" in part) as | TextPart | undefined; if (!firstPart) continue; // Skip if no text part is found const content = firstPart.text; generatedText += content; const output: TextGenerationStreamOutput = { token: { id: tokenId++, text: content, logprob: 0, special: false, }, generated_text: null, details: null, }; yield output; } const output: TextGenerationStreamOutput = { token: { id: tokenId++, text: "", logprob: 0, special: true, }, generated_text: generatedText, details: null, }; yield output; })(); }; } async function fileToImageBlock( file: MessageFile, opts: ImageProcessorOptions<"image/png" | "image/jpeg" | "image/webp"> ): Promise<Part> { const processor = makeImageProcessor(opts); const { image, mime } = await processor(file); return { inlineData: { mimeType: mime, data: image.toString("base64"), }, }; } export default endpointGenAI;
chat-ui/src/lib/server/endpoints/google/endpointGenAI.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/google/endpointGenAI.ts", "repo_id": "chat-ui", "token_count": 1780 }
import type { ToolResult, Tool } from "$lib/types/Tool"; import { MessageReasoningUpdateType, MessageUpdateType, type MessageUpdate, } from "$lib/types/MessageUpdate"; import { AbortedGenerations } from "../abortedGenerations"; import type { TextGenerationContext } from "./types"; import type { EndpointMessage } from "../endpoints/endpoints"; import { generateFromDefaultEndpoint } from "../generateFromDefaultEndpoint"; import { generateSummaryOfReasoning } from "./reasoning"; import { logger } from "../logger"; type GenerateContext = Omit<TextGenerationContext, "messages"> & { messages: EndpointMessage[] }; export async function* generate( { model, endpoint, conv, messages, assistant, isContinue, promptedAt }: GenerateContext, toolResults: ToolResult[], preprompt?: string, tools?: Tool[] ): AsyncIterable<MessageUpdate> { // reasoning mode is false by default let reasoning = false; let reasoningBuffer = ""; let lastReasoningUpdate = new Date(); let status = ""; const startTime = new Date(); if ( model.reasoning && (model.reasoning.type === "regex" || model.reasoning.type === "summarize") ) { // if the model has reasoning in regex or summarize mode, it starts in reasoning mode // and we extract the answer from the reasoning reasoning = true; yield { type: MessageUpdateType.Reasoning, subtype: MessageReasoningUpdateType.Status, status: "Started reasoning...", }; } for await (const output of await endpoint({ messages, preprompt, continueMessage: isContinue, generateSettings: assistant?.generateSettings, tools, toolResults, isMultimodal: model.multimodal, conversationId: conv._id, })) { // text generation completed if (output.generated_text) { let interrupted = !output.token.special && !model.parameters.stop?.includes(output.token.text); let text = output.generated_text.trimEnd(); for (const stopToken of model.parameters.stop ?? []) { if (!text.endsWith(stopToken)) continue; interrupted = false; text = text.slice(0, text.length - stopToken.length); } let finalAnswer = text; if (model.reasoning && model.reasoning.type === "regex") { const regex = new RegExp(model.reasoning.regex); finalAnswer = regex.exec(reasoningBuffer)?.[1] ?? text; } else if (model.reasoning && model.reasoning.type === "summarize") { yield { type: MessageUpdateType.Reasoning, subtype: MessageReasoningUpdateType.Status, status: "Summarizing reasoning...", }; try { const summary = yield* generateFromDefaultEndpoint({ messages: [ { from: "user", content: `Question: ${ messages[messages.length - 1].content }\n\nReasoning: ${reasoningBuffer}`, }, ], preprompt: `Your task is to summarize concisely all your reasoning steps and then give the final answer. Keep it short, one short paragraph at most. If the reasoning steps explicitly include a code solution, make sure to include it in your answer. If the user is just having a casual conversation that doesn't require explanations, answer directly without explaining your steps, otherwise make sure to summarize step by step, make sure to skip dead-ends in your reasoning and removing excess detail. Do not use prefixes such as Response: or Answer: when answering to the user.`, generateSettings: { max_new_tokens: 1024, }, }); finalAnswer = summary; yield { type: MessageUpdateType.Reasoning, subtype: MessageReasoningUpdateType.Status, status: `Done in ${Math.round((new Date().getTime() - startTime.getTime()) / 1000)}s.`, }; } catch (e) { finalAnswer = text; logger.error(e); } } else if (model.reasoning && model.reasoning.type === "tokens") { // make sure to remove the content of the reasoning buffer from // the final answer to avoid duplication const beginIndex = reasoningBuffer.indexOf(model.reasoning.beginToken); const endIndex = reasoningBuffer.lastIndexOf(model.reasoning.endToken); if (beginIndex !== -1 && endIndex !== -1) { // Remove the reasoning section (including tokens) from final answer finalAnswer = text.slice(0, beginIndex) + text.slice(endIndex + model.reasoning.endToken.length); } yield { type: MessageUpdateType.FinalAnswer, text: finalAnswer, interrupted, webSources: output.webSources, }; continue; } } if (model.reasoning && model.reasoning.type === "tokens") { if (output.token.text === model.reasoning.beginToken) { reasoning = true; reasoningBuffer += output.token.text; yield { type: MessageUpdateType.Reasoning, subtype: MessageReasoningUpdateType.Status, status: "Started thinking...", }; continue; } else if (output.token.text === model.reasoning.endToken) { reasoning = false; reasoningBuffer += output.token.text; yield { type: MessageUpdateType.Reasoning, subtype: MessageReasoningUpdateType.Status, status: `Done in ${Math.round((new Date().getTime() - startTime.getTime()) / 1000)}s.`, }; continue; } } // ignore special tokens if (output.token.special) continue; // pass down normal token if (reasoning) { reasoningBuffer += output.token.text; // yield status update if it has changed if (status !== "") { yield { type: MessageUpdateType.Reasoning, subtype: MessageReasoningUpdateType.Status, status, }; status = ""; } // create a new status every 5 seconds if (new Date().getTime() - lastReasoningUpdate.getTime() > 4000) { lastReasoningUpdate = new Date(); try { generateSummaryOfReasoning(reasoningBuffer).then((summary) => { status = summary; }); } catch (e) { logger.error(e); } } yield { type: MessageUpdateType.Reasoning, subtype: MessageReasoningUpdateType.Stream, token: output.token.text, }; } else { yield { type: MessageUpdateType.Stream, token: output.token.text }; } // abort check const date = AbortedGenerations.getInstance().getList().get(conv._id.toString()); if (date && date > promptedAt) break; // no output check if (!output) break; } }
chat-ui/src/lib/server/textGeneration/generate.ts/0
{ "file_path": "chat-ui/src/lib/server/textGeneration/generate.ts", "repo_id": "chat-ui", "token_count": 2248 }
import { env } from "$env/dynamic/private"; import { getJson, type GoogleParameters } from "serpapi"; import type { WebSearchSource } from "$lib/types/WebSearch"; import { isURL } from "$lib/utils/isUrl"; type SerpApiResponse = { organic_results: { link: string; }[]; }; export default async function searchWebSerpApi(query: string): Promise<WebSearchSource[]> { const params = { q: query, hl: "en", gl: "us", google_domain: "google.com", api_key: env.SERPAPI_KEY, } satisfies GoogleParameters; // Show result as JSON const response = (await getJson("google", params)) as unknown as SerpApiResponse; return response.organic_results.filter(({ link }) => isURL(link)); }
chat-ui/src/lib/server/websearch/search/endpoints/serpApi.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/search/endpoints/serpApi.ts", "repo_id": "chat-ui", "token_count": 236 }
export function switchTheme() { const { classList } = document.querySelector("html") as HTMLElement; const metaTheme = document.querySelector('meta[name="theme-color"]') as HTMLMetaElement; if (classList.contains("dark")) { classList.remove("dark"); metaTheme.setAttribute("content", "rgb(249, 250, 251)"); localStorage.theme = "light"; } else { classList.add("dark"); metaTheme.setAttribute("content", "rgb(26, 36, 50)"); localStorage.theme = "dark"; } }
chat-ui/src/lib/switchTheme.ts/0
{ "file_path": "chat-ui/src/lib/switchTheme.ts", "repo_id": "chat-ui", "token_count": 164 }
import { defaultModel } from "$lib/server/models"; import type { Assistant } from "./Assistant"; import type { Timestamps } from "./Timestamps"; import type { User } from "./User"; export interface Settings extends Timestamps { userId?: User["_id"]; sessionId?: string; /** * Note: Only conversations with this settings explicitly set to true should be shared. * * This setting is explicitly set to true when users accept the ethics modal. * */ shareConversationsWithModelAuthors: boolean; ethicsModalAcceptedAt: Date | null; activeModel: string; hideEmojiOnSidebar?: boolean; // model name and system prompts customPrompts?: Record<string, string>; assistants?: Assistant["_id"][]; tools?: string[]; disableStream: boolean; directPaste: boolean; } export type SettingsEditable = Omit<Settings, "ethicsModalAcceptedAt" | "createdAt" | "updatedAt">; // TODO: move this to a constant file along with other constants export const DEFAULT_SETTINGS = { shareConversationsWithModelAuthors: true, activeModel: defaultModel.id, hideEmojiOnSidebar: false, customPrompts: {}, assistants: [], tools: [], disableStream: false, directPaste: false, } satisfies SettingsEditable;
chat-ui/src/lib/types/Settings.ts/0
{ "file_path": "chat-ui/src/lib/types/Settings.ts", "repo_id": "chat-ui", "token_count": 369 }
export function getHref( url: URL | string, modifications: { newKeys?: Record<string, string | undefined | null>; existingKeys?: { behaviour: "delete_except" | "delete"; keys: string[] }; } ) { const newUrl = new URL(url); const { newKeys, existingKeys } = modifications; // exsiting keys logic if (existingKeys) { const { behaviour, keys } = existingKeys; if (behaviour === "delete") { for (const key of keys) { newUrl.searchParams.delete(key); } } else { // delete_except const keysToPreserve = keys; for (const key of [...newUrl.searchParams.keys()]) { if (!keysToPreserve.includes(key)) { newUrl.searchParams.delete(key); } } } } // new keys logic if (newKeys) { for (const [key, val] of Object.entries(newKeys)) { if (val) { newUrl.searchParams.set(key, val); } else { newUrl.searchParams.delete(key); } } } return newUrl.toString(); }
chat-ui/src/lib/utils/getHref.ts/0
{ "file_path": "chat-ui/src/lib/utils/getHref.ts", "repo_id": "chat-ui", "token_count": 373 }
import { browser } from "$app/environment"; import { isDesktop } from "./isDesktop"; export async function share(url: string, title: string, appendLeafId: boolean = false) { if (!browser) return; // Retrieve the leafId from localStorage const leafId = localStorage.getItem("leafId"); if (appendLeafId && leafId) { // Use URL and URLSearchParams to add the leafId parameter const shareUrl = new URL(url); shareUrl.searchParams.append("leafId", leafId); url = shareUrl.toString(); } if (navigator.share && !isDesktop(window)) { navigator.share({ url, title }); } else { // this is really ugly // but on chrome the clipboard write doesn't work if the window isn't focused // and after we use confirm() to ask the user if they want to share, the window is no longer focused // for a few ms until the confirm dialog closes. tried await tick(), tried window.focus(), didnt work // bug doesnt occur in firefox, if you can find a better fix for it please do await new Promise((resolve) => setTimeout(resolve, 250)); await navigator.clipboard.writeText(url); } }
chat-ui/src/lib/utils/share.ts/0
{ "file_path": "chat-ui/src/lib/utils/share.ts", "repo_id": "chat-ui", "token_count": 331 }
export async function GET({ locals }) { if (locals.user) { const res = { id: locals.user._id, username: locals.user.username, name: locals.user.name, email: locals.user.email, avatarUrl: locals.user.avatarUrl, hfUserId: locals.user.hfUserId, }; return Response.json(res); } return Response.json({ message: "Must be signed in" }, { status: 401 }); }
chat-ui/src/routes/api/user/+server.ts/0
{ "file_path": "chat-ui/src/routes/api/user/+server.ts", "repo_id": "chat-ui", "token_count": 148 }
import { authCondition } from "$lib/server/auth"; import { collections } from "$lib/server/database"; import { error } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; /** * Ideally, we'd be able to detect the client-side abort, see https://github.com/huggingface/chat-ui/pull/88#issuecomment-1523173850 */ export async function POST({ params, locals }) { const conversationId = new ObjectId(params.id); const conversation = await collections.conversations.findOne({ _id: conversationId, ...authCondition(locals), }); if (!conversation) { error(404, "Conversation not found"); } await collections.abortedGenerations.updateOne( { conversationId }, { $set: { updatedAt: new Date() }, $setOnInsert: { createdAt: new Date() } }, { upsert: true } ); return new Response(); }
chat-ui/src/routes/conversation/[id]/stop-generating/+server.ts/0
{ "file_path": "chat-ui/src/routes/conversation/[id]/stop-generating/+server.ts", "repo_id": "chat-ui", "token_count": 260 }
<script lang="ts"> import Modal from "$lib/components/Modal.svelte"; import CarbonClose from "~icons/carbon/close"; import CarbonTrashCan from "~icons/carbon/trash-can"; import CarbonArrowUpRight from "~icons/carbon/arrow-up-right"; import { enhance } from "$app/forms"; import { base } from "$app/paths"; import { useSettingsStore } from "$lib/stores/settings"; import Switch from "$lib/components/Switch.svelte"; import { env as envPublic } from "$env/dynamic/public"; let isConfirmingDeletion = $state(false); let settings = useSettingsStore(); </script> <div class="flex w-full flex-col gap-5"> <div class="flex flex-col items-start justify-between text-xl font-semibold text-gray-800"> <h2>Application Settings</h2> {#if !!envPublic.PUBLIC_COMMIT_SHA} <a href={`https://github.com/huggingface/chat-ui/commit/${envPublic.PUBLIC_COMMIT_SHA}`} target="_blank" rel="noreferrer" class="text-sm font-light text-gray-500" > Latest deployment <span class="gap-2 font-mono" >{envPublic.PUBLIC_COMMIT_SHA.slice(0, 7)}</span > </a> {/if} </div> <div class="flex h-full max-w-2xl flex-col gap-2 max-sm:pt-0"> {#if envPublic.PUBLIC_APP_DATA_SHARING === "1"} <label class="flex items-center"> <Switch name="shareConversationsWithModelAuthors" bind:checked={$settings.shareConversationsWithModelAuthors} /> <div class="inline cursor-pointer select-none items-center gap-2 pl-2"> Share conversations with model authors </div> </label> <p class="text-sm text-gray-500"> Sharing your data will help improve the training data and make open models better over time. </p> {/if} <label class="mt-6 flex items-center"> <Switch name="hideEmojiOnSidebar" bind:checked={$settings.hideEmojiOnSidebar} /> <div class="inline cursor-pointer select-none items-center gap-2 pl-2 font-semibold"> Hide emoticons in conversation topics <p class="text-sm font-normal text-gray-500"> Emoticons are shown in the sidebar by default, enable this to hide them. </p> </div> </label> <label class="mt-6 flex items-center"> <Switch name="disableStream" bind:checked={$settings.disableStream} /> <div class="inline cursor-pointer select-none items-center gap-2 pl-2 font-semibold"> Disable streaming tokens </div> </label> <label class="mt-6 flex items-center"> <Switch name="directPaste" bind:checked={$settings.directPaste} /> <div class="inline cursor-pointer select-none items-center gap-2 pl-2 font-semibold"> Paste text directly into chat <p class="text-sm font-normal text-gray-500"> By default, when pasting long text into the chat, we treat it as a plaintext file. Enable this to paste directly into the chat instead. </p> </div> </label> <div class="mt-12 flex flex-col gap-3"> <a href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions" target="_blank" rel="noreferrer" class="flex items-center underline decoration-gray-300 underline-offset-2 hover:decoration-gray-700" ><CarbonArrowUpRight class="mr-1.5 shrink-0 text-sm " /> Share your feedback on HuggingChat</a > <button onclick={(e) => { e.preventDefault(); isConfirmingDeletion = true; }} type="submit" class="flex items-center underline decoration-gray-300 underline-offset-2 hover:decoration-gray-700" ><CarbonTrashCan class="mr-2 inline text-sm text-red-500" />Delete all conversations</button > </div> </div> {#if isConfirmingDeletion} <Modal on:close={() => (isConfirmingDeletion = false)}> <form use:enhance={() => { isConfirmingDeletion = false; }} method="post" action="{base}/conversations?/delete" class="flex w-full flex-col gap-5 p-6" > <div class="flex items-start justify-between text-xl font-semibold text-gray-800"> <h2>Are you sure?</h2> <button type="button" class="group" onclick={(e) => { e.stopPropagation(); isConfirmingDeletion = false; }} > <CarbonClose class="text-gray-900 group-hover:text-gray-500" /> </button> </div> <p class="text-gray-800"> This action will delete all your conversations. This cannot be undone. </p> <button type="submit" class="mt-2 rounded-full bg-red-700 px-5 py-2 text-lg font-semibold text-gray-100 ring-gray-400 ring-offset-1 transition-all hover:ring focus-visible:outline-none focus-visible:ring" > Confirm deletion </button> </form> </Modal> {/if} </div>
chat-ui/src/routes/settings/(nav)/+page.svelte/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/+page.svelte", "repo_id": "chat-ui", "token_count": 1841 }
import { base } from "$app/paths"; import { redirect } from "@sveltejs/kit"; export async function load({ parent }) { const { enableCommunityTools } = await parent(); if (enableCommunityTools) { return {}; } redirect(302, `${base}/`); }
chat-ui/src/routes/tools/+layout.ts/0
{ "file_path": "chat-ui/src/routes/tools/+layout.ts", "repo_id": "chat-ui", "token_count": 82 }
{ "$schema": "https://vega.github.io/schema/vega-lite/v4.json", "data": { "values": "<DVC_METRIC_DATA>" }, "title": "<DVC_METRIC_TITLE>", "mark": { "type": "line" }, "encoding": { "x": { "field": "<DVC_METRIC_X>", "type": "quantitative", "title": "<DVC_METRIC_X_LABEL>" }, "y": { "field": "<DVC_METRIC_Y>", "type": "quantitative", "title": "<DVC_METRIC_Y_LABEL>", "scale": { "zero": false } }, "color": { "field": "rev", "type": "nominal" } } }
datasets/.dvc/plots/default.json/0
{ "file_path": "datasets/.dvc/plots/default.json", "repo_id": "datasets", "token_count": 419 }
cff-version: 1.2.0 message: "If you use this software, please cite it as below." title: "huggingface/datasets" authors: - family-names: Lhoest given-names: Quentin - family-names: Villanova del Moral given-names: Albert orcid: "https://orcid.org/0000-0003-1727-1045" - family-names: von Platen given-names: Patrick - family-names: Wolf given-names: Thomas - family-names: Šaško given-names: Mario - family-names: Jernite given-names: Yacine - family-names: Thakur given-names: Abhishek - family-names: Tunstall given-names: Lewis - family-names: Patil given-names: Suraj - family-names: Drame given-names: Mariama - family-names: Chaumond given-names: Julien - family-names: Plu given-names: Julien - family-names: Davison given-names: Joe - family-names: Brandeis given-names: Simon - family-names: Sanh given-names: Victor - family-names: Le Scao given-names: Teven - family-names: Canwen Xu given-names: Kevin - family-names: Patry given-names: Nicolas - family-names: Liu given-names: Steven - family-names: McMillan-Major given-names: Angelina - family-names: Schmid given-names: Philipp - family-names: Gugger given-names: Sylvain - family-names: Raw given-names: Nathan - family-names: Lesage given-names: Sylvain - family-names: Lozhkov given-names: Anton - family-names: Carrigan given-names: Matthew - family-names: Matussière given-names: Théo - family-names: von Werra given-names: Leandro - family-names: Debut given-names: Lysandre - family-names: Bekman given-names: Stas - family-names: Delangue given-names: Clément doi: 10.5281/zenodo.4817768 repository-code: "https://github.com/huggingface/datasets" license: Apache-2.0 preferred-citation: type: conference-paper title: "Datasets: A Community Library for Natural Language Processing" authors: - family-names: Lhoest given-names: Quentin - family-names: Villanova del Moral given-names: Albert orcid: "https://orcid.org/0000-0003-1727-1045" - family-names: von Platen given-names: Patrick - family-names: Wolf given-names: Thomas - family-names: Šaško given-names: Mario - family-names: Jernite given-names: Yacine - family-names: Thakur given-names: Abhishek - family-names: Tunstall given-names: Lewis - family-names: Patil given-names: Suraj - family-names: Drame given-names: Mariama - family-names: Chaumond given-names: Julien - family-names: Plu given-names: Julien - family-names: Davison given-names: Joe - family-names: Brandeis given-names: Simon - family-names: Sanh given-names: Victor - family-names: Le Scao given-names: Teven - family-names: Canwen Xu given-names: Kevin - family-names: Patry given-names: Nicolas - family-names: Liu given-names: Steven - family-names: McMillan-Major given-names: Angelina - family-names: Schmid given-names: Philipp - family-names: Gugger given-names: Sylvain - family-names: Raw given-names: Nathan - family-names: Lesage given-names: Sylvain - family-names: Lozhkov given-names: Anton - family-names: Carrigan given-names: Matthew - family-names: Matussière given-names: Théo - family-names: von Werra given-names: Leandro - family-names: Debut given-names: Lysandre - family-names: Bekman given-names: Stas - family-names: Delangue given-names: Clément collection-title: "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System Demonstrations" collection-type: proceedings month: 11 year: 2021 publisher: name: "Association for Computational Linguistics" url: "https://aclanthology.org/2021.emnlp-demo.21" start: 175 end: 184 identifiers: - type: other value: "arXiv:2109.02846" description: "The arXiv preprint of the paper"
datasets/CITATION.cff/0
{ "file_path": "datasets/CITATION.cff", "repo_id": "datasets", "token_count": 1428 }
# Load audio data You can load an audio dataset using the [`Audio`] feature that automatically decodes and resamples the audio files when you access the examples. Audio decoding is based on the [`soundfile`](https://github.com/bastibe/python-soundfile) python package, which uses the [`libsndfile`](https://github.com/libsndfile/libsndfile) C library under the hood. ## Installation To work with audio datasets, you need to have the `audio` dependencies installed. Check out the [installation](./installation#audio) guide to learn how to install it. ## Local files You can load your own dataset using the paths to your audio files. Use the [`~Dataset.cast_column`] function to take a column of audio file paths, and cast it to the [`Audio`] feature: ```py >>> audio_dataset = Dataset.from_dict({"audio": ["path/to/audio_1", "path/to/audio_2", ..., "path/to/audio_n"]}).cast_column("audio", Audio()) >>> audio_dataset[0]["audio"] {'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, 0. , 0. ], dtype=float32), 'path': 'path/to/audio_1', 'sampling_rate': 16000} ``` ## AudioFolder You can also load a dataset with an `AudioFolder` dataset builder. It does not require writing a custom dataloader, making it useful for quickly creating and loading audio datasets with several thousand audio files. ## AudioFolder with metadata To link your audio files with metadata information, make sure your dataset has a `metadata.csv` file. Your dataset structure might look like: ``` folder/train/metadata.csv folder/train/first_audio_file.mp3 folder/train/second_audio_file.mp3 folder/train/third_audio_file.mp3 ``` Your `metadata.csv` file must have a `file_name` column which links audio files with their metadata. An example `metadata.csv` file might look like: ```text file_name,transcription first_audio_file.mp3,znowu się duch z ciałem zrośnie w młodocianej wstaniesz wiosnie i możesz skutkiem tych leków umierać wstawać wiek wieków dalej tam były przestrogi jak siekać głowę jak nogi second_audio_file.mp3,już u źwierzyńca podwojów król zasiada przy nim książęta i panowie rada a gdzie wzniosły krążył ganek rycerze obok kochanek król skinął palcem zaczęto igrzysko third_audio_file.mp3,pewnie kędyś w obłędzie ubite minęły szlaki zaczekajmy dzień jaki poślemy szukać wszędzie dziś jutro pewnie będzie posłali wszędzie sługi czekali dzień i drugi gdy nic nie doczekali z płaczem chcą jechać dali ``` `AudioFolder` will load audio data and create a `transcription` column containing texts from `metadata.csv`: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("audiofolder", data_dir="/path/to/folder") >>> # OR by specifying the list of files >>> dataset = load_dataset("audiofolder", data_files=["path/to/audio_1", "path/to/audio_2", ..., "path/to/audio_n"]) ``` You can load remote datasets from their URLs with the data_files parameter: ```py >>> dataset = load_dataset("audiofolder", data_files=["https://foo.bar/audio_1", "https://foo.bar/audio_2", ..., "https://foo.bar/audio_n"] >>> # for example, pass SpeechCommands archive: >>> dataset = load_dataset("audiofolder", data_files="https://s3.amazonaws.com/datasets.huggingface.co/SpeechCommands/v0.01/v0.01_test.tar.gz") ``` Metadata can also be specified as JSON Lines, in which case use `metadata.jsonl` as the name of the metadata file. This format is helpful in scenarios when one of the columns is complex, e.g. a list of floats, to avoid parsing errors or reading the complex values as strings. To ignore the information in the metadata file, set `drop_metadata=True` in [`load_dataset`]: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("audiofolder", data_dir="/path/to/folder", drop_metadata=True) ``` If you don't have a metadata file, `AudioFolder` automatically infers the label name from the directory name. If you want to drop automatically created labels, set `drop_labels=True`. In this case, your dataset will only contain an audio column: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("audiofolder", data_dir="/path/to/folder_without_metadata", drop_labels=True) ``` <Tip> For more information about creating your own `AudioFolder` dataset, take a look at the [Create an audio dataset](./audio_dataset) guide. </Tip> For a guide on how to load any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./loading">general loading guide</a>.
datasets/docs/source/audio_load.mdx/0
{ "file_path": "datasets/docs/source/audio_load.mdx", "repo_id": "datasets", "token_count": 1529 }
# Semantic segmentation Semantic segmentation datasets are used to train a model to classify every pixel in an image. There are a wide variety of applications enabled by these datasets such as background removal from images, stylizing images, or scene understanding for autonomous driving. This guide will show you how to apply transformations to an image segmentation dataset. Before you start, make sure you have up-to-date versions of `albumentations` and `cv2` installed: ```bash pip install -U albumentations opencv-python ``` [Albumentations](https://albumentations.ai/) is a Python library for performing data augmentation for computer vision. It supports various computer vision tasks such as image classification, object detection, segmentation, and keypoint estimation. This guide uses the [Scene Parsing](https://huggingface.co/datasets/scene_parse_150) dataset for segmenting and parsing an image into different image regions associated with semantic categories, such as sky, road, person, and bed. Load the `train` split of the dataset and take a look at an example: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("scene_parse_150", split="train") >>> index = 10 >>> dataset[index] {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=683x512 at 0x7FB37B0EC810>, 'annotation': <PIL.PngImagePlugin.PngImageFile image mode=L size=683x512 at 0x7FB37B0EC9D0>, 'scene_category': 927} ``` The dataset has three fields: * `image`: a PIL image object. * `annotation`: segmentation mask of the image. * `scene_category`: the label or scene category of the image (like “kitchen” or “office”). Next, check out an image with: ```py >>> dataset[index]["image"] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/image_seg.png"> </div> Similarly, you can check out the respective segmentation mask: ```py >>> dataset[index]["annotation"] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/seg_mask.png"> </div> We can also add a [color palette](https://github.com/tensorflow/models/blob/3f1ca33afe3c1631b733ea7e40c294273b9e406d/research/deeplab/utils/get_dataset_colormap.py#L51) on the segmentation mask and overlay it on top of the original image to visualize the dataset: After defining the color palette, you should be ready to visualize some overlays. ```py >>> import matplotlib.pyplot as plt >>> def visualize_seg_mask(image: np.ndarray, mask: np.ndarray): ... color_seg = np.zeros((mask.shape[0], mask.shape[1], 3), dtype=np.uint8) ... palette = np.array(create_ade20k_label_colormap()) ... for label, color in enumerate(palette): ... color_seg[mask == label, :] = color ... color_seg = color_seg[..., ::-1] # convert to BGR ... img = np.array(image) * 0.5 + color_seg * 0.5 # plot the image with the segmentation map ... img = img.astype(np.uint8) ... plt.figure(figsize=(15, 10)) ... plt.imshow(img) ... plt.axis("off") ... plt.show() >>> visualize_seg_mask( ... np.array(dataset[index]["image"]), ... np.array(dataset[index]["annotation"]) ... ) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/seg_overlay.png"> </div> Now apply some augmentations with `albumentations`. You’ll first resize the image and adjust its brightness. ```py >>> import albumentations >>> transform = albumentations.Compose( ... [ ... albumentations.Resize(256, 256), ... albumentations.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, p=0.5), ... ] ... ) ``` Create a function to apply the transformation to the images: ```py >>> def transforms(examples): ... transformed_images, transformed_masks = [], [] ... ... for image, seg_mask in zip(examples["image"], examples["annotation"]): ... image, seg_mask = np.array(image), np.array(seg_mask) ... transformed = transform(image=image, mask=seg_mask) ... transformed_images.append(transformed["image"]) ... transformed_masks.append(transformed["mask"]) ... ... examples["pixel_values"] = transformed_images ... examples["label"] = transformed_masks ... return examples ``` Use the [`~Dataset.set_transform`] function to apply the transformation on-the-fly to batches of the dataset to consume less disk space: ```py >>> dataset.set_transform(transforms) ``` You can verify the transformation worked by indexing into the `pixel_values` and `label` of an example: ```py >>> image = np.array(dataset[index]["pixel_values"]) >>> mask = np.array(dataset[index]["label"]) >>> visualize_seg_mask(image, mask) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/albumentations_seg.png"> </div> In this guide, you have used `albumentations` for augmenting the dataset. It's also possible to use `torchvision` to apply some similar transforms. ```py >>> from torchvision.transforms import Resize, ColorJitter, Compose >>> transformation_chain = Compose([ ... Resize((256, 256)), ... ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1) ... ]) >>> resize = Resize((256, 256)) >>> def train_transforms(example_batch): ... example_batch["pixel_values"] = [transformation_chain(x) for x in example_batch["image"]] ... example_batch["label"] = [resize(x) for x in example_batch["annotation"]] ... return example_batch >>> dataset.set_transform(train_transforms) >>> image = np.array(dataset[index]["pixel_values"]) >>> mask = np.array(dataset[index]["label"]) >>> visualize_seg_mask(image, mask) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/torchvision_seg.png"> </div> <Tip> Now that you know how to process a dataset for semantic segmentation, learn [how to train a semantic segmentation model](https://huggingface.co/docs/transformers/tasks/semantic_segmentation) and use it for inference. </Tip>
datasets/docs/source/semantic_segmentation.mdx/0
{ "file_path": "datasets/docs/source/semantic_segmentation.mdx", "repo_id": "datasets", "token_count": 2142 }
# Create a video dataset This guide will show you how to create a video dataset with `VideoFolder` and some metadata. This is a no-code solution for quickly creating a video dataset with several thousand videos. <Tip> You can control access to your dataset by requiring users to share their contact information first. Check out the [Gated datasets](https://huggingface.co/docs/hub/datasets-gated) guide for more information about how to enable this feature on the Hub. </Tip> ## VideoFolder The `VideoFolder` is a dataset builder designed to quickly load a video dataset with several thousand videos without requiring you to write any code. <Tip> 💡 Take a look at the [Split pattern hierarchy](repository_structure#split-pattern-hierarchy) to learn more about how `VideoFolder` creates dataset splits based on your dataset repository structure. </Tip> `VideoFolder` automatically infers the class labels of your dataset based on the directory name. Store your dataset in a directory structure like: ``` folder/train/dog/golden_retriever.mp4 folder/train/dog/german_shepherd.mp4 folder/train/dog/chihuahua.mp4 folder/train/cat/maine_coon.mp4 folder/train/cat/bengal.mp4 folder/train/cat/birman.mp4 ``` Then users can load your dataset by specifying `videofolder` in [`load_dataset`] and the directory in `data_dir`: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("videofolder", data_dir="/path/to/folder") ``` You can also use `videofolder` to load datasets involving multiple splits. To do so, your dataset directory should have the following structure: ``` folder/train/dog/golden_retriever.mp4 folder/train/cat/maine_coon.mp4 folder/test/dog/german_shepherd.mp4 folder/test/cat/bengal.mp4 ``` <Tip warning={true}> If all video files are contained in a single directory or if they are not on the same level of directory structure, `label` column won't be added automatically. If you need it, set `drop_labels=False` explicitly. </Tip> If there is additional information you'd like to include about your dataset, like text captions or bounding boxes, add it as a `metadata.csv` file in your folder. This lets you quickly create datasets for different computer vision tasks like text captioning or object detection. You can also use a JSONL file `metadata.jsonl`. ``` folder/train/metadata.csv folder/train/0001.mp4 folder/train/0002.mp4 folder/train/0003.mp4 ``` You can also zip your videos: ``` folder/metadata.csv folder/train.zip folder/test.zip folder/valid.zip ``` Your `metadata.csv` file must have a `file_name` column which links video files with their metadata: ```csv file_name,additional_feature 0001.mp4,This is a first value of a text feature you added to your videos 0002.mp4,This is a second value of a text feature you added to your videos 0003.mp4,This is a third value of a text feature you added to your videos ``` or using `metadata.jsonl`: ```jsonl {"file_name": "0001.mp4", "additional_feature": "This is a first value of a text feature you added to your videos"} {"file_name": "0002.mp4", "additional_feature": "This is a second value of a text feature you added to your videos"} {"file_name": "0003.mp4", "additional_feature": "This is a third value of a text feature you added to your videos"} ``` <Tip> If metadata files are present, the inferred labels based on the directory name are dropped by default. To include those labels, set `drop_labels=False` in `load_dataset`. </Tip> ### Video captioning Video captioning datasets have text describing a video. An example `metadata.csv` may look like: ```csv file_name,text 0001.mp4,This is a golden retriever playing with a ball 0002.mp4,A german shepherd 0003.mp4,One chihuahua ``` Load the dataset with `VideoFolder`, and it will create a `text` column for the video captions: ```py >>> dataset = load_dataset("videofolder", data_dir="/path/to/folder", split="train") >>> dataset[0]["text"] "This is a golden retriever playing with a ball" ``` ### Upload dataset to the Hub Once you've created a dataset, you can share it to the using `huggingface_hub` for example. Make sure you have the [huggingface_hub](https://huggingface.co/docs/huggingface_hub/index) library installed and you're logged in to your Hugging Face account (see the [Upload with Python tutorial](upload_dataset#upload-with-python) for more details). Upload your dataset with `huggingface_hub.HfApi.upload_folder`: ```py from huggingface_hub import HfApi api = HfApi() api.upload_folder( folder_path="/path/to/local/dataset", repo_id="username/my-cool-dataset", repo_type="dataset", ) ``` ## WebDataset The [WebDataset](https://github.com/webdataset/webdataset) format is based on TAR archives and is suitable for big video datasets. Indeed you can group your videos in TAR archives (e.g. 1GB of videos per TAR archive) and have thousands of TAR archives: ``` folder/train/00000.tar folder/train/00001.tar folder/train/00002.tar ... ``` In the archives, each example is made of files sharing the same prefix: ``` e39871fd9fd74f55.mp4 e39871fd9fd74f55.json f18b91585c4d3f3e.mp4 f18b91585c4d3f3e.json ede6e66b2fb59aab.mp4 ede6e66b2fb59aab.json ed600d57fcee4f94.mp4 ed600d57fcee4f94.json ... ``` You can put your videos labels/captions/features using JSON or text files for example. For more details on the WebDataset format and the python library, please check the [WebDataset documentation](https://webdataset.github.io/webdataset). Load your WebDataset and it will create on column per file suffix (here "mp4" and "json"): ```python >>> from datasets import load_dataset >>> dataset = load_dataset("webdataset", data_dir="/path/to/folder", split="train") >>> dataset[0]["json"] {"bbox": [[302.0, 109.0, 73.0, 52.0]], "categories": [0]} ```
datasets/docs/source/video_dataset.mdx/0
{ "file_path": "datasets/docs/source/video_dataset.mdx", "repo_id": "datasets", "token_count": 1813 }
import platform from argparse import ArgumentParser import fsspec import huggingface_hub import pandas import pyarrow from datasets import __version__ as version from datasets.commands import BaseDatasetsCLICommand def info_command_factory(_): return EnvironmentCommand() class EnvironmentCommand(BaseDatasetsCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): download_parser = parser.add_parser("env", help="Print relevant system environment info.") download_parser.set_defaults(func=info_command_factory) def run(self): info = { "`datasets` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "`huggingface_hub` version": huggingface_hub.__version__, "PyArrow version": pyarrow.__version__, "Pandas version": pandas.__version__, "`fsspec` version": fsspec.__version__, } print("\nCopy-and-paste the text below in your GitHub issue.\n") print(self.format_dict(info)) return info @staticmethod def format_dict(d): return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
datasets/src/datasets/commands/env.py/0
{ "file_path": "datasets/src/datasets/commands/env.py", "repo_id": "datasets", "token_count": 476 }
import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.download_config import DownloadConfig from ..table import array_cast from ..utils.file_utils import is_local_path, xopen from ..utils.py_utils import string_to_dict if TYPE_CHECKING: from decord import VideoReader from .features import FeatureType @dataclass class Video: """ **Experimental.** Video [`Feature`] to read video data from a video file. Input: The Video feature accepts as input: - A `str`: Absolute path to the video file (i.e. random access is allowed). - A `dict` with the keys: - `path`: String with relative path of the video file in a dataset repository. - `bytes`: Bytes of the video file. This is useful for archived files with sequential access. - A `decord.VideoReader`: decord video reader object. Args: mode (`str`, *optional*): The mode to convert the video to. If `None`, the native mode of the video is used. decode (`bool`, defaults to `True`): Whether to decode the video data. If `False`, returns the underlying dictionary in the format `{"path": video_path, "bytes": video_bytes}`. Examples: ```py >>> from datasets import Dataset, Video >>> ds = Dataset.from_dict({"video":["path/to/Screen Recording.mov"]}).cast_column("video", Video()) >>> ds.features["video"] Video(decode=True, id=None) >>> ds[0]["video"] <decord.video_reader.VideoReader at 0x105525c70> >>> ds = ds.cast_column('video', Video(decode=False)) {'bytes': None, 'path': 'path/to/Screen Recording.mov'} ``` """ decode: bool = True id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "decord.VideoReader" pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()}) _type: str = field(default="Video", init=False, repr=False) def __post_init__(self): if config.DECORD_AVAILABLE: patch_decord() def __call__(self): return self.pa_type def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "VideoReader"]) -> dict: """Encode example into a format for Arrow. Args: value (`str`, `np.ndarray`, `VideoReader` or `dict`): Data passed as input to Video feature. Returns: `dict` with "path" and "bytes" fields """ if config.DECORD_AVAILABLE: from decord import VideoReader else: VideoReader = None if isinstance(value, list): value = np.array(value) if isinstance(value, str): return {"path": value, "bytes": None} elif isinstance(value, bytes): return {"path": None, "bytes": value} elif isinstance(value, np.ndarray): # convert the video array to bytes return encode_np_array(value) elif VideoReader and isinstance(value, VideoReader): # convert the decord video reader to bytes return encode_decord_video(value) elif value.get("path") is not None and os.path.isfile(value["path"]): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("path")} elif value.get("bytes") is not None or value.get("path") is not None: # store the video bytes, and path is used to infer the video format using the file extension return {"bytes": value.get("bytes"), "path": value.get("path")} else: raise ValueError( f"A video sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def decode_example(self, value: dict, token_per_repo_id=None) -> "VideoReader": """Decode example video file into video data. Args: value (`str` or `dict`): A string with the absolute video file path, a dictionary with keys: - `path`: String with absolute or relative video file path. - `bytes`: The bytes of the video file. token_per_repo_id (`dict`, *optional*): To access and decode video files from private repositories on the Hub, you can pass a dictionary repo_id (`str`) -> token (`bool` or `str`). Returns: `decord.VideoReader` """ if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Video(decode=True) instead.") if config.DECORD_AVAILABLE: from decord import VideoReader else: raise ImportError("To support decoding videos, please install 'decord'.") if token_per_repo_id is None: token_per_repo_id = {} path, bytes_ = value["path"], value["bytes"] if bytes_ is None: if path is None: raise ValueError(f"A video should have one of 'path' or 'bytes' but both are None in {value}.") else: if is_local_path(path): video = VideoReader(path) else: source_url = path.split("::")[-1] pattern = ( config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL ) try: repo_id = string_to_dict(source_url, pattern)["repo_id"] token = token_per_repo_id.get(repo_id) except ValueError: token = None download_config = DownloadConfig(token=token) with xopen(path, "rb", download_config=download_config) as f: bytes_ = BytesIO(f.read()) video = VideoReader(bytes_) else: video = VideoReader(BytesIO(bytes_)) return video def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary.""" from .features import Value return ( self if self.decode else { "bytes": Value("binary"), "path": Value("string"), } ) def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray: """Cast an Arrow array to the Video arrow storage type. The Arrow types that can be converted to the Video pyarrow storage type are: - `pa.string()` - it must contain the "path" data - `pa.binary()` - it must contain the video bytes - `pa.struct({"bytes": pa.binary()})` - `pa.struct({"path": pa.string()})` - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter - `pa.list(*)` - it must contain the video array data Args: storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`): PyArrow array to cast. Returns: `pa.StructArray`: Array in the Video arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ if pa.types.is_string(storage.type): bytes_array = pa.array([None] * len(storage), type=pa.binary()) storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_binary(storage.type): path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_struct(storage.type): if storage.type.get_field_index("bytes") >= 0: bytes_array = storage.field("bytes") else: bytes_array = pa.array([None] * len(storage), type=pa.binary()) if storage.type.get_field_index("path") >= 0: path_array = storage.field("path") else: path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_list(storage.type): bytes_array = pa.array( [encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), ) path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays( [bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() ) return array_cast(storage, self.pa_type) def video_to_bytes(video: "VideoReader") -> bytes: """Convert a decord Video object to bytes using native compression if possible""" raise NotImplementedError() def encode_decord_video(video: "VideoReader") -> dict: if hasattr(video, "_hf_encoded"): return video._hf_encoded else: raise NotImplementedError( "Encoding a decord video is not implemented. " "Please call `datasets.features.video.patch_decord()` before loading videos to enable this." ) def encode_np_array(array: np.ndarray) -> dict: raise NotImplementedError() # Patching decord a little bit to: # 1. store the encoded video data {"path": ..., "bytes": ...} in `video._hf_encoded`` # 2. set the decord bridge to numpy/torch/tf/jax using `video._hf_bridge_out` (per video instance) instead of decord.bridge.bridge_out (global) # This doesn't affect the normal usage of decord. def _patched_init(self: "VideoReader", uri: Union[str, BytesIO], *args, **kwargs) -> None: from decord.bridge import bridge_out if hasattr(uri, "read"): self._hf_encoded = {"bytes": uri.read(), "path": None} uri.seek(0) elif isinstance(uri, str): self._hf_encoded = {"bytes": None, "path": uri} self._hf_bridge_out = bridge_out self._original_init(uri, *args, **kwargs) def _patched_next(self: "VideoReader", *args, **kwargs): return self._hf_bridge_out(self._original_next(*args, **kwargs)) def _patched_get_batch(self: "VideoReader", *args, **kwargs): return self._hf_bridge_out(self._original_get_batch(*args, **kwargs)) def patch_decord(): # We need to import torch first, otherwise later it can cause issues # e.g. "RuntimeError: random_device could not be read" # when running `torch.tensor(value).share_memory_()` # Same for duckdb which crashes on import if config.TORCH_AVAILABLE: import torch # noqa if config.DUCKDB_AVAILABLE: import duckdb # noqa import decord.video_reader from decord import VideoReader if not hasattr(VideoReader, "_hf_patched"): decord.video_reader.bridge_out = lambda x: x VideoReader._original_init = VideoReader.__init__ VideoReader.__init__ = _patched_init VideoReader._original_next = VideoReader.next VideoReader.next = _patched_next VideoReader._original_get_batch = VideoReader.get_batch VideoReader.get_batch = _patched_get_batch VideoReader._hf_patched = True
datasets/src/datasets/features/video.py/0
{ "file_path": "datasets/src/datasets/features/video.py", "repo_id": "datasets", "token_count": 5033 }
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.csv.csv import Csv from ..utils import tqdm as hf_tqdm from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class CsvDatasetReader(AbstractDatasetReader): def __init__( self, path_or_paths: NestedDataStructureLike[PathLike], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, num_proc: Optional[int] = None, **kwargs, ): super().__init__( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs, ) path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} self.builder = Csv( cache_dir=cache_dir, data_files=path_or_paths, features=features, **kwargs, ) def read(self): # Build iterable dataset if self.streaming: dataset = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: download_config = None download_mode = None verification_mode = None base_path = None self.builder.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, base_path=base_path, num_proc=self.num_proc, ) dataset = self.builder.as_dataset( split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory ) return dataset class CsvDatasetWriter: def __init__( self, dataset: Dataset, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, **to_csv_kwargs, ): if num_proc is not None and num_proc <= 0: raise ValueError(f"num_proc {num_proc} must be an integer > 0.") self.dataset = dataset self.path_or_buf = path_or_buf self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE self.num_proc = num_proc self.encoding = "utf-8" self.storage_options = storage_options or {} self.to_csv_kwargs = to_csv_kwargs def write(self) -> int: _ = self.to_csv_kwargs.pop("path_or_buf", None) header = self.to_csv_kwargs.pop("header", True) index = self.to_csv_kwargs.pop("index", False) if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): with fsspec.open(self.path_or_buf, "wb", **(self.storage_options or {})) as buffer: written = self._write(file_obj=buffer, header=header, index=index, **self.to_csv_kwargs) else: written = self._write(file_obj=self.path_or_buf, header=header, index=index, **self.to_csv_kwargs) return written def _batch_csv(self, args): offset, header, index, to_csv_kwargs = args batch = query_table( table=self.dataset.data, key=slice(offset, offset + self.batch_size), indices=self.dataset._indices, ) csv_str = batch.to_pandas().to_csv( path_or_buf=None, header=header if (offset == 0) else False, index=index, **to_csv_kwargs ) return csv_str.encode(self.encoding) def _write(self, file_obj: BinaryIO, header, index, **to_csv_kwargs) -> int: """Writes the pyarrow table as CSV to a binary file handle. Caller is responsible for opening and closing the handle. """ written = 0 if self.num_proc is None or self.num_proc == 1: for offset in hf_tqdm( range(0, len(self.dataset), self.batch_size), unit="ba", desc="Creating CSV from Arrow format", ): csv_str = self._batch_csv((offset, header, index, to_csv_kwargs)) written += file_obj.write(csv_str) else: num_rows, batch_size = len(self.dataset), self.batch_size with multiprocessing.Pool(self.num_proc) as pool: for csv_str in hf_tqdm( pool.imap( self._batch_csv, [(offset, header, index, to_csv_kwargs) for offset in range(0, num_rows, batch_size)], ), total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, unit="ba", desc="Creating CSV from Arrow format", ): written += file_obj.write(csv_str) return written
datasets/src/datasets/io/csv.py/0
{ "file_path": "datasets/src/datasets/io/csv.py", "repo_id": "datasets", "token_count": 2556 }