code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = ["""pixel_values"""] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __lowerCamelCase = size if size is not None else {'''shortest_edge''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __lowerCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' ) __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = resample __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_normalize __lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD __lowerCamelCase = do_convert_rgb def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowerCamelCase = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = do_resize if do_resize is not None else self.do_resize __lowerCamelCase = size if size is not None else self.size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = resample if resample is not None else self.resample __lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCamelCase = crop_size if crop_size is not None else self.crop_size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCamelCase = image_mean if image_mean is not None else self.image_mean __lowerCamelCase = image_std if image_std is not None else self.image_std __lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowerCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowerCamelCase = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: __lowerCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: __lowerCamelCase = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: __lowerCamelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] __lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __lowerCamelCase = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
330
1
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def a__ ( _UpperCamelCase : Optional[int] ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __lowerCamelCase = module __lowerCamelCase = nn.Sequential( nn.Linear(module.in_features , __UpperCAmelCase , bias=__UpperCAmelCase ) , nn.Linear(__UpperCAmelCase , module.out_features , bias=__UpperCAmelCase ) , ) __lowerCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__UpperCAmelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowerCamelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.module(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) + self.adapter(__UpperCAmelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module lowerCAmelCase__ = """bigscience/bloom-1b7""" # Constant values lowerCAmelCase__ = 2.1_09_65_95_52_69_25_74 lowerCAmelCase__ = """Hello my name is""" lowerCAmelCase__ = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) lowerCAmelCase__ = 1_0 def lowerCamelCase ( self ): '''simple docstring''' # Models and tokenizer __lowerCamelCase = AutoTokenizer.from_pretrained(self.model_name ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # Models and tokenizer __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='''auto''' ) __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_abit.config self.assertTrue(hasattr(__UpperCAmelCase , '''quantization_config''' ) ) __lowerCamelCase = config.to_dict() __lowerCamelCase = config.to_diff_dict() __lowerCamelCase = config.to_json_string() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit __lowerCamelCase = self.model_fpaa.get_memory_footprint() __lowerCamelCase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __lowerCamelCase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__UpperCAmelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() __lowerCamelCase = True __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = model_abit_from_config.generate( input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() with self.assertRaises(__UpperCAmelCase ): __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ): # Tries with `str` self.model_abit.to('''cpu''' ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.to(torch.device('''cuda:0''' ) ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_fpaa.to(torch.floataa ) __lowerCamelCase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.to('''cpu''' ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.half() # Check this does not throw an error __lowerCamelCase = self.model_fpaa.float() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): @classmethod def lowerCamelCase ( cls ): '''simple docstring''' __lowerCamelCase = '''t5-small''' __lowerCamelCase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense __lowerCamelCase = AutoTokenizer.from_pretrained(cls.model_name ) __lowerCamelCase = '''Translate in German: Hello, my dog is cute''' def lowerCamelCase ( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaForConditionalGeneration __lowerCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules __lowerCamelCase = None # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) __lowerCamelCase = modules def lowerCamelCase ( self ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # model_name __lowerCamelCase = '''bigscience/bloom-560m''' __lowerCamelCase = '''t5-small''' # Different types of model __lowerCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Sequence classification model __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # CausalLM model __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Seq2seq model __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = pipeline( '''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __lowerCamelCase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''balanced''' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) # Second real batch __lowerCamelCase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = '''facebook/opt-350m''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ): return # Step 1: freeze all parameters __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __lowerCamelCase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __lowerCamelCase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__UpperCAmelCase ) ): __lowerCamelCase = LoRALayer(module.q_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.k_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __lowerCamelCase = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __lowerCamelCase = model.forward(**__UpperCAmelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__UpperCAmelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = """gpt2-xl""" lowerCAmelCase__ = 3.31_91_85_48_54_15_21_87
330
from __future__ import annotations from typing import Generic, TypeVar a_ = TypeVar("""T""") class __lowerCAmelCase ( Generic[T] ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = data __lowerCamelCase = self __lowerCamelCase = 0 class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # map from node name to the node object __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # create a new set with x as its member __lowerCamelCase = DisjointSetTreeNode(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # find the set x belongs to (with path-compression) __lowerCamelCase = self.map[data] if elem_ref != elem_ref.parent: __lowerCamelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # helper function for union operation if nodea.rank > nodea.rank: __lowerCamelCase = nodea else: __lowerCamelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # merge 2 disjoint sets self.link(self.find_set(__UpperCAmelCase ) , self.find_set(__UpperCAmelCase ) ) class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # connections: map from the node to the neighbouring nodes (with weights) __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # add a node ONLY if its not present in the graph if node not in self.connections: __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # add an edge with the given weight self.add_node(__UpperCAmelCase ) self.add_node(__UpperCAmelCase ) __lowerCamelCase = weight __lowerCamelCase = weight def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __UpperCAmelCase : x[2] ) # creating the disjoint set __lowerCamelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__UpperCAmelCase ) # MST generation __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = edges[index] index += 1 __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) disjoint_set.union(__UpperCAmelCase , __UpperCAmelCase ) return graph
330
1
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = tempfile.mkdtemp() # fmt: off __lowerCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __lowerCamelCase = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } __lowerCamelCase = os.path.join(self.tmpdirname , __UpperCAmelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self , **__UpperCAmelCase ): '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def lowerCamelCase ( self , **__UpperCAmelCase ): '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __lowerCamelCase = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_image_processor() __lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCamelCase = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 ) __lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = image_processor(__UpperCAmelCase , return_tensors='''np''' ) __lowerCamelCase = processor(images=__UpperCAmelCase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = processor(text=__UpperCAmelCase ) __lowerCamelCase = tokenizer(__UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(__UpperCAmelCase ): processor() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase = processor.batch_decode(__UpperCAmelCase ) __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
330
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self ): '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_config() __lowerCamelCase = 300 return config def lowerCamelCase ( self ): '''simple docstring''' ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = self.prepare_config_and_inputs() __lowerCamelCase = True __lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = True __lowerCamelCase = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_choices __lowerCamelCase = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = config_and_inputs __lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = () def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCamelCase = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason='''MRA does not output attentions''' ) def lowerCamelCase ( self ): '''simple docstring''' return @require_torch class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) __lowerCamelCase = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
1
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __lowerCAmelCase ( lowerCAmelCase__ ): @slow @require_torch def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' ) __lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) __lowerCamelCase = bertabert.config.encoder.vocab_size __lowerCamelCase = tokenizer.sep_token_id __lowerCamelCase = tokenizer.cls_token_id __lowerCamelCase = 128 __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' ) __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' ) __lowerCamelCase = train_dataset.select(range(32 ) ) __lowerCamelCase = val_dataset.select(range(16 ) ) __lowerCamelCase = 4 def _map_to_encoder_decoder_inputs(__UpperCAmelCase ): # Tokenizer will automatically set [BOS] <text> [EOS] __lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 ) __lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 ) __lowerCamelCase = inputs.input_ids __lowerCamelCase = inputs.attention_mask __lowerCamelCase = outputs.input_ids __lowerCamelCase = outputs.input_ids.copy() __lowerCamelCase = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels'''] ] __lowerCamelCase = outputs.attention_mask assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids ) assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(__UpperCAmelCase ): __lowerCamelCase = pred.label_ids __lowerCamelCase = pred.predictions # all unnecessary tokens are removed __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase ) return {"accuracy": accuracy} # map train dataset __lowerCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) train_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) # same for validation dataset __lowerCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) val_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) __lowerCamelCase = self.get_auto_remove_tmp_dir() __lowerCamelCase = SeqaSeqTrainingArguments( output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __lowerCamelCase = SeqaSeqTrainer( model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , ) # start training trainer.train()
330
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""EncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TFEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""FlaxEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer a_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} a_ = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } a_ = { """unc-nlp/lxmert-base-uncased""": 512, } a_ = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = LxmertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ): '''simple docstring''' super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , ) __lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars ): __lowerCamelCase = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) ) __lowerCamelCase = do_lower_case __lowerCamelCase = strip_accents __lowerCamelCase = tokenize_chinese_chars __lowerCamelCase = normalizer_class(**__UpperCAmelCase ) __lowerCamelCase = do_lower_case def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __lowerCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
330
from string import ascii_lowercase, ascii_uppercase def a__ ( _UpperCamelCase : str ): if not sentence: return "" __lowerCamelCase = dict(zip(_UpperCamelCase ,_UpperCamelCase ) ) return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
330
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __lowerCAmelCase ( lowerCAmelCase__ ): @slow @require_torch def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' ) __lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) __lowerCamelCase = bertabert.config.encoder.vocab_size __lowerCamelCase = tokenizer.sep_token_id __lowerCamelCase = tokenizer.cls_token_id __lowerCamelCase = 128 __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' ) __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' ) __lowerCamelCase = train_dataset.select(range(32 ) ) __lowerCamelCase = val_dataset.select(range(16 ) ) __lowerCamelCase = 4 def _map_to_encoder_decoder_inputs(__UpperCAmelCase ): # Tokenizer will automatically set [BOS] <text> [EOS] __lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 ) __lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 ) __lowerCamelCase = inputs.input_ids __lowerCamelCase = inputs.attention_mask __lowerCamelCase = outputs.input_ids __lowerCamelCase = outputs.input_ids.copy() __lowerCamelCase = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels'''] ] __lowerCamelCase = outputs.attention_mask assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids ) assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(__UpperCAmelCase ): __lowerCamelCase = pred.label_ids __lowerCamelCase = pred.predictions # all unnecessary tokens are removed __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase ) return {"accuracy": accuracy} # map train dataset __lowerCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) train_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) # same for validation dataset __lowerCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) val_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) __lowerCamelCase = self.get_auto_remove_tmp_dir() __lowerCamelCase = SeqaSeqTrainingArguments( output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __lowerCamelCase = SeqaSeqTrainer( model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , ) # start training trainer.train()
330
1
import unittest from knapsack import greedy_knapsack as kp class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [10, 20, 30, 40, 50, 60] __lowerCamelCase = [2, 4, 6, 8, 10, 12] __lowerCamelCase = 100 self.assertEqual(kp.calc_profit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 210 ) def lowerCamelCase ( self ): '''simple docstring''' self.assertRaisesRegex(__UpperCAmelCase , '''max_weight must greater than zero.''' ) def lowerCamelCase ( self ): '''simple docstring''' self.assertRaisesRegex(__UpperCAmelCase , '''Weight can not be negative.''' ) def lowerCamelCase ( self ): '''simple docstring''' self.assertRaisesRegex(__UpperCAmelCase , '''Profit can not be negative.''' ) def lowerCamelCase ( self ): '''simple docstring''' self.assertRaisesRegex(__UpperCAmelCase , '''max_weight must greater than zero.''' ) def lowerCamelCase ( self ): '''simple docstring''' self.assertRaisesRegex( __UpperCAmelCase , '''The length of profit and weight must be same.''' ) if __name__ == "__main__": unittest.main()
330
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
1
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) a_ = """\ Text data. Second line of data.""" a_ = """file""" @pytest.fixture(scope='''session''' ) def a__ ( _UpperCamelCase : str ): __lowerCamelCase = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') __lowerCamelCase = bytes(_UpperCamelCase ,'''utf-8''' ) with zstd.open(_UpperCamelCase ,'''wb''' ) as f: f.write(_UpperCamelCase ) return path @pytest.fixture def a__ ( _UpperCamelCase : Dict ): with open(os.path.join(tmpfs.local_root_dir ,_UpperCamelCase ) ,'''w''' ) as f: f.write(_UpperCamelCase ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' ,['''gzip''', '''xz''', '''zstd'''] ) def a__ ( _UpperCamelCase : int ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : int ,_UpperCamelCase : List[str] ,_UpperCamelCase : Tuple ): __lowerCamelCase = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} __lowerCamelCase = input_paths[compression_format] __lowerCamelCase = tmp_path / '''cache''' __lowerCamelCase = DownloadConfig(cache_dir=_UpperCamelCase ,extract_compressed_file=_UpperCamelCase ) __lowerCamelCase = cached_path(_UpperCamelCase ,download_config=_UpperCamelCase ) with open(_UpperCamelCase ) as f: __lowerCamelCase = f.read() with open(_UpperCamelCase ) as f: __lowerCamelCase = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' ,[True, False] ) @pytest.mark.parametrize('''default_cache_dir''' ,[True, False] ) def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : int ,_UpperCamelCase : List[str] ,_UpperCamelCase : str ): __lowerCamelCase = '''custom_cache''' __lowerCamelCase = '''custom_extracted_dir''' __lowerCamelCase = tmp_path / '''custom_extracted_path''' if default_extracted: __lowerCamelCase = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' ,_UpperCamelCase ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' ,str(_UpperCamelCase ) ) __lowerCamelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) __lowerCamelCase = xz_file __lowerCamelCase = ( DownloadConfig(extract_compressed_file=_UpperCamelCase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir ,extract_compressed_file=_UpperCamelCase ) ) __lowerCamelCase = cached_path(_UpperCamelCase ,download_config=_UpperCamelCase ) assert Path(_UpperCamelCase ).parent.parts[-2:] == expected def a__ ( _UpperCamelCase : Tuple ): # absolute path __lowerCamelCase = str(Path(_UpperCamelCase ).resolve() ) assert cached_path(_UpperCamelCase ) == text_file # relative path __lowerCamelCase = str(Path(_UpperCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_UpperCamelCase ) == text_file def a__ ( _UpperCamelCase : Union[str, Any] ): # absolute path __lowerCamelCase = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(_UpperCamelCase ): cached_path(_UpperCamelCase ) # relative path __lowerCamelCase = '''./__missing_file__.txt''' with pytest.raises(_UpperCamelCase ): cached_path(_UpperCamelCase ) def a__ ( _UpperCamelCase : Dict ): __lowerCamelCase = get_from_cache(F"""tmp://{tmpfs_file}""" ) with open(_UpperCamelCase ) as f: __lowerCamelCase = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_UpperCamelCase ) def a__ ( ): with pytest.raises(_UpperCamelCase ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_UpperCamelCase ) def a__ ( _UpperCamelCase : Optional[int] ): __lowerCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_UpperCamelCase ): http_get('''https://huggingface.co''' ,temp_file=_UpperCamelCase ) with pytest.raises(_UpperCamelCase ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_UpperCamelCase ) def a__ ( _UpperCamelCase : Optional[int] ): __lowerCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_UpperCamelCase ): ftp_get('''ftp://huggingface.co''' ,temp_file=_UpperCamelCase ) with pytest.raises(_UpperCamelCase ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_UpperCamelCase ) def a__ ( _UpperCamelCase : Dict ): __lowerCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_UpperCamelCase ): fsspec_get('''s3://huggingface.co''' ,temp_file=_UpperCamelCase ) with pytest.raises(_UpperCamelCase ): fsspec_head('''s3://huggingface.co''' )
330
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=1000 ): '''simple docstring''' __lowerCamelCase = p_stop __lowerCamelCase = max_length def __iter__( self ): '''simple docstring''' __lowerCamelCase = 0 __lowerCamelCase = False while not stop and count < self.max_length: yield count count += 1 __lowerCamelCase = random.random() < self.p_stop class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = [ BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 ) ] __lowerCamelCase = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] __lowerCamelCase = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ): '''simple docstring''' random.seed(__UpperCAmelCase ) __lowerCamelCase = list(__UpperCAmelCase ) __lowerCamelCase = [ IterableDatasetShard( __UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , ) for i in range(__UpperCAmelCase ) ] __lowerCamelCase = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(__UpperCAmelCase ) iterable_dataset_lists.append(list(__UpperCAmelCase ) ) __lowerCamelCase = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size __lowerCamelCase = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 ) __lowerCamelCase = [] for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(__UpperCAmelCase ) < len(__UpperCAmelCase ): reference += reference self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = 42 __lowerCamelCase = RandomIterableDataset() self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) # Edge case with a very small dataset __lowerCamelCase = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = SkipBatchSampler(__UpperCAmelCase , 2 ) self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 ) __lowerCamelCase = skip_first_batches(__UpperCAmelCase , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def lowerCamelCase ( self ): '''simple docstring''' Accelerator() __lowerCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
330
1
from __future__ import annotations import os from typing import Any import requests a_ = """https://api.github.com""" # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user a_ = BASE_URL + """/user""" # https://github.com/settings/tokens a_ = os.environ.get("""USER_TOKEN""", """""") def a__ ( _UpperCamelCase : str ): __lowerCamelCase = { '''Authorization''': F"""token {auth_token}""", '''Accept''': '''application/vnd.github.v3+json''', } return requests.get(_UpperCamelCase ,headers=_UpperCamelCase ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(f"{key}: {value}") else: raise ValueError("""'USER_TOKEN' field cannot be empty.""")
330
def a__ ( _UpperCamelCase : int ): __lowerCamelCase = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
330
1
from __future__ import annotations def a__ ( _UpperCamelCase : list[int] ): if not nums: return 0 __lowerCamelCase = nums[0] __lowerCamelCase = 0 for num in nums[1:]: __lowerCamelCase ,__lowerCamelCase = ( max_excluding + num, max(_UpperCamelCase ,_UpperCamelCase ), ) return max(_UpperCamelCase ,_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
330
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ = 16 a_ = 32 def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : int = 16 ): __lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' ) def tokenize_function(_UpperCamelCase : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCamelCase = datasets.map( _UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' ) def collate_fn(_UpperCamelCase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCamelCase = 16 elif accelerator.mixed_precision != "no": __lowerCamelCase = 8 else: __lowerCamelCase = None return tokenizer.pad( _UpperCamelCase ,padding='''longest''' ,max_length=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_tensors='''pt''' ,) # Instantiate dataloaders. __lowerCamelCase = DataLoader( tokenized_datasets['''train'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) __lowerCamelCase = DataLoader( tokenized_datasets['''validation'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders a_ = mocked_dataloaders # noqa: F811 def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,_UpperCamelCase ) == "1": __lowerCamelCase = 2 # Initialize accelerator __lowerCamelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCamelCase = config['''lr'''] __lowerCamelCase = int(config['''num_epochs'''] ) __lowerCamelCase = int(config['''seed'''] ) __lowerCamelCase = int(config['''batch_size'''] ) __lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=_UpperCamelCase ) def inner_training_loop(_UpperCamelCase : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(_UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCamelCase = model.to(accelerator.device ) # Instantiate optimizer __lowerCamelCase = AdamW(params=model.parameters() ,lr=_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = get_dataloaders(_UpperCamelCase ,_UpperCamelCase ) # Instantiate scheduler __lowerCamelCase = get_linear_schedule_with_warmup( optimizer=_UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(_UpperCamelCase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = accelerator.prepare( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) # Now we train the model for epoch in range(_UpperCamelCase ): model.train() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.loss accelerator.backward(_UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.logits.argmax(dim=-1 ) __lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_UpperCamelCase ,references=_UpperCamelCase ,) __lowerCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" ,_UpperCamelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a__ ( ): __lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' ,) parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_UpperCamelCase ,_UpperCamelCase ) if __name__ == "__main__": main()
330
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = ["""pixel_values"""] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __lowerCamelCase = size if size is not None else {'''shortest_edge''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __lowerCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' ) __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = resample __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_normalize __lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD __lowerCamelCase = do_convert_rgb def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowerCamelCase = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = do_resize if do_resize is not None else self.do_resize __lowerCamelCase = size if size is not None else self.size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = resample if resample is not None else self.resample __lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCamelCase = crop_size if crop_size is not None else self.crop_size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCamelCase = image_mean if image_mean is not None else self.image_mean __lowerCamelCase = image_std if image_std is not None else self.image_std __lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowerCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowerCamelCase = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: __lowerCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: __lowerCamelCase = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: __lowerCamelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] __lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __lowerCamelCase = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
330
import logging import os import threading import time try: import warnings except ImportError: a_ = None try: import msvcrt except ImportError: a_ = None try: import fcntl except ImportError: a_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: a_ = OSError # Data # ------------------------------------------------ a_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] a_ = """3.0.12""" a_ = None def a__ ( ): global _logger __lowerCamelCase = _logger or logging.getLogger(__name__ ) return _logger class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock_file return None def __str__( self ): '''simple docstring''' __lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class __lowerCAmelCase : def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock return None def __enter__( self ): '''simple docstring''' return self.lock def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.lock.release() return None class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase ) # The path to the lock file. __lowerCamelCase = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __lowerCamelCase = None # The default timeout value. __lowerCamelCase = timeout # We use this lock primarily for the lock counter. __lowerCamelCase = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __lowerCamelCase = 0 return None @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file @property def lowerCamelCase ( self ): '''simple docstring''' return self._timeout @timeout.setter def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = float(__UpperCAmelCase ) return None def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file_fd is not None def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ): '''simple docstring''' # Use the default timeout, if no timeout is provided. if timeout is None: __lowerCamelCase = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file __lowerCamelCase = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(__UpperCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __lowerCamelCase = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def lowerCamelCase ( self , __UpperCAmelCase=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __lowerCamelCase = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self ): '''simple docstring''' self.acquire() return self def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.release() return None def __del__( self ): '''simple docstring''' self.release(force=__UpperCAmelCase ) return None def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = os.path.basename(__UpperCAmelCase ) if len(__UpperCAmelCase ) > max_length and max_length > 0: __lowerCamelCase = os.path.dirname(__UpperCAmelCase ) __lowerCamelCase = str(hash(__UpperCAmelCase ) ) __lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(__UpperCAmelCase , __UpperCAmelCase ) else: return path class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) __lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: try: msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(__UpperCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) try: fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN ) os.close(__UpperCAmelCase ) return None class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' os.close(self._lock_file_fd ) __lowerCamelCase = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None a_ = None if msvcrt: a_ = WindowsFileLock elif fcntl: a_ = UnixFileLock else: a_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
330
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = ["""pixel_values"""] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __lowerCamelCase = size if size is not None else {'''shortest_edge''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __lowerCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' ) __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = resample __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_normalize __lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD __lowerCamelCase = do_convert_rgb def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowerCamelCase = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = do_resize if do_resize is not None else self.do_resize __lowerCamelCase = size if size is not None else self.size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = resample if resample is not None else self.resample __lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCamelCase = crop_size if crop_size is not None else self.crop_size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCamelCase = image_mean if image_mean is not None else self.image_mean __lowerCamelCase = image_std if image_std is not None else self.image_std __lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowerCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowerCamelCase = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: __lowerCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: __lowerCamelCase = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: __lowerCamelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] __lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __lowerCamelCase = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
330
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase="divided_space_time" , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = num_channels __lowerCamelCase = patch_size __lowerCamelCase = num_frames __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = attention_type __lowerCamelCase = initializer_range __lowerCamelCase = scope __lowerCamelCase = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __lowerCamelCase = (image_size // patch_size) ** 2 __lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __lowerCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __lowerCamelCase = self.num_labels return config def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) # verify the logits shape __lowerCamelCase = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = config_and_inputs __lowerCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowerCAmelCase__ = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerModelTester(self ) __lowerCamelCase = ConfigTester( self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = copy.deepcopy(__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def lowerCamelCase ( self ): '''simple docstring''' pass def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = TimesformerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' if not self.has_attentions: pass else: __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase = True for model_class in self.all_model_classes: __lowerCamelCase = self.model_tester.seq_length __lowerCamelCase = self.model_tester.num_frames __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __lowerCamelCase = len(__UpperCAmelCase ) # Check attention is always last and order is fine __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(out_len + 1 , len(__UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowerCamelCase ( self ): '''simple docstring''' def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.hidden_states __lowerCamelCase = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) __lowerCamelCase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def a__ ( ): __lowerCamelCase = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' ) __lowerCamelCase = np.load(_UpperCamelCase ) return list(_UpperCamelCase ) @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self ): '''simple docstring''' # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( __UpperCAmelCase ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_video() __lowerCamelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**__UpperCAmelCase ) # verify the logits __lowerCamelCase = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def a__ ( _UpperCamelCase : Optional[Any] ): __lowerCamelCase = 3_84 __lowerCamelCase = 7 if "tiny" in model_name: __lowerCamelCase = 96 __lowerCamelCase = (2, 2, 6, 2) __lowerCamelCase = (3, 6, 12, 24) elif "small" in model_name: __lowerCamelCase = 96 __lowerCamelCase = (2, 2, 18, 2) __lowerCamelCase = (3, 6, 12, 24) elif "base" in model_name: __lowerCamelCase = 1_28 __lowerCamelCase = (2, 2, 18, 2) __lowerCamelCase = (4, 8, 16, 32) __lowerCamelCase = 12 __lowerCamelCase = 5_12 elif "large" in model_name: __lowerCamelCase = 1_92 __lowerCamelCase = (2, 2, 18, 2) __lowerCamelCase = (6, 12, 24, 48) __lowerCamelCase = 12 __lowerCamelCase = 7_68 # set label information __lowerCamelCase = 1_50 __lowerCamelCase = '''huggingface/label-files''' __lowerCamelCase = '''ade20k-id2label.json''' __lowerCamelCase = json.load(open(hf_hub_download(_UpperCamelCase ,_UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) ) __lowerCamelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __lowerCamelCase = {v: k for k, v in idalabel.items()} __lowerCamelCase = SwinConfig( embed_dim=_UpperCamelCase ,depths=_UpperCamelCase ,num_heads=_UpperCamelCase ,window_size=_UpperCamelCase ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ,) __lowerCamelCase = UperNetConfig( backbone_config=_UpperCamelCase ,auxiliary_in_channels=_UpperCamelCase ,num_labels=_UpperCamelCase ,idalabel=_UpperCamelCase ,labelaid=_UpperCamelCase ,) return config def a__ ( _UpperCamelCase : List[Any] ): __lowerCamelCase = [] # fmt: off # stem rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def a__ ( _UpperCamelCase : int ,_UpperCamelCase : List[str] ,_UpperCamelCase : Optional[Any] ): __lowerCamelCase = dct.pop(_UpperCamelCase ) __lowerCamelCase = val def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Dict ): __lowerCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __lowerCamelCase = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __lowerCamelCase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" ) __lowerCamelCase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __lowerCamelCase = in_proj_weight[:dim, :] __lowerCamelCase = in_proj_bias[: dim] __lowerCamelCase = in_proj_weight[ dim : dim * 2, : ] __lowerCamelCase = in_proj_bias[ dim : dim * 2 ] __lowerCamelCase = in_proj_weight[ -dim :, : ] __lowerCamelCase = in_proj_bias[-dim :] # fmt: on def a__ ( _UpperCamelCase : Optional[Any] ): __lowerCamelCase ,__lowerCamelCase = x.shape __lowerCamelCase = x.reshape(_UpperCamelCase ,4 ,in_channel // 4 ) __lowerCamelCase = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_UpperCamelCase ,_UpperCamelCase ) return x def a__ ( _UpperCamelCase : Optional[Any] ): __lowerCamelCase ,__lowerCamelCase = x.shape __lowerCamelCase = x.reshape(_UpperCamelCase ,in_channel // 4 ,4 ) __lowerCamelCase = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_UpperCamelCase ,_UpperCamelCase ) return x def a__ ( _UpperCamelCase : Any ): __lowerCamelCase = x.shape[0] __lowerCamelCase = x.reshape(4 ,in_channel // 4 ) __lowerCamelCase = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_UpperCamelCase ) return x def a__ ( _UpperCamelCase : Tuple ): __lowerCamelCase = x.shape[0] __lowerCamelCase = x.reshape(in_channel // 4 ,4 ) __lowerCamelCase = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_UpperCamelCase ) return x def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : Dict ,_UpperCamelCase : str ): __lowerCamelCase = { '''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''', '''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''', '''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''', '''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''', } __lowerCamelCase = model_name_to_url[model_name] __lowerCamelCase = torch.hub.load_state_dict_from_url(_UpperCamelCase ,map_location='''cpu''' ,file_name=_UpperCamelCase )[ '''state_dict''' ] for name, param in state_dict.items(): print(_UpperCamelCase ,param.shape ) __lowerCamelCase = get_upernet_config(_UpperCamelCase ) __lowerCamelCase = UperNetForSemanticSegmentation(_UpperCamelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): __lowerCamelCase = state_dict.pop(_UpperCamelCase ) if "bn" in key: __lowerCamelCase = key.replace('''bn''' ,'''batch_norm''' ) __lowerCamelCase = val # rename keys __lowerCamelCase = create_rename_keys(_UpperCamelCase ) for src, dest in rename_keys: rename_key(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) read_in_q_k_v(_UpperCamelCase ,config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: __lowerCamelCase = reverse_correct_unfold_reduction_order(_UpperCamelCase ) if "norm" in key: __lowerCamelCase = reverse_correct_unfold_norm_order(_UpperCamelCase ) model.load_state_dict(_UpperCamelCase ) # verify on image __lowerCamelCase = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' __lowerCamelCase = Image.open(requests.get(_UpperCamelCase ,stream=_UpperCamelCase ).raw ).convert('''RGB''' ) __lowerCamelCase = SegformerImageProcessor() __lowerCamelCase = processor(_UpperCamelCase ,return_tensors='''pt''' ).pixel_values with torch.no_grad(): __lowerCamelCase = model(_UpperCamelCase ) __lowerCamelCase = outputs.logits print(logits.shape ) print('''First values of logits:''' ,logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": __lowerCamelCase = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": __lowerCamelCase = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": __lowerCamelCase = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": __lowerCamelCase = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print('''Logits:''' ,outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_UpperCamelCase ,atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_UpperCamelCase ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(_UpperCamelCase ) if push_to_hub: print(F"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(F"""openmmlab/{model_name}""" ) processor.push_to_hub(F"""openmmlab/{model_name}""" ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[f"upernet-swin-{size}" for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) a_ = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
330
def a__ ( _UpperCamelCase : int ): if not isinstance(_UpperCamelCase ,_UpperCamelCase ): __lowerCamelCase = F"""Input value of [number={number}] must be an integer""" raise TypeError(_UpperCamelCase ) if number < 0: return False __lowerCamelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
330
1
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self ): '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_config() __lowerCamelCase = 300 return config def lowerCamelCase ( self ): '''simple docstring''' ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = self.prepare_config_and_inputs() __lowerCamelCase = True __lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = True __lowerCamelCase = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_choices __lowerCamelCase = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = config_and_inputs __lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = () def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCamelCase = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason='''MRA does not output attentions''' ) def lowerCamelCase ( self ): '''simple docstring''' return @require_torch class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) __lowerCamelCase = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={"_".join([str(__UpperCAmelCase ) for s in shape] )}.npy""" def lowerCamelCase ( self ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 4, 64, 64) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return image def lowerCamelCase ( self , __UpperCAmelCase=False , __UpperCAmelCase="CompVis/stable-diffusion-v1-4" ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = '''bf16''' if fpaa else None __lowerCamelCase ,__lowerCamelCase = FlaxUNetaDConditionModel.from_pretrained( __UpperCAmelCase , subfolder='''unet''' , dtype=__UpperCAmelCase , revision=__UpperCAmelCase ) return model, params def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 77, 768) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]], [17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]], [8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]], [3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]], [17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]], [8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]], [3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , shape=(4, 4, 96, 96) , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , shape=(4, 77, 1024) , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
330
1
import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor a_ = logging.get_logger(__name__) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' warnings.warn( '''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use YolosImageProcessor instead.''' , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
330
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
1
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ = logging.getLogger(__name__) def a__ ( _UpperCamelCase : List[str] ,_UpperCamelCase : Tuple ): __lowerCamelCase = np.argmax(_UpperCamelCase ,axis=1 ) return np.sum(outputs == labels ) def a__ ( _UpperCamelCase : Optional[Any] ): with open(_UpperCamelCase ,encoding='''utf_8''' ) as f: __lowerCamelCase = csv.reader(_UpperCamelCase ) __lowerCamelCase = [] next(_UpperCamelCase ) # skip the first line for line in tqdm(_UpperCamelCase ): output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : Tuple ,_UpperCamelCase : str ,_UpperCamelCase : List[str] ,_UpperCamelCase : str ,_UpperCamelCase : Any ): __lowerCamelCase = [] for dataset in encoded_datasets: __lowerCamelCase = len(_UpperCamelCase ) __lowerCamelCase = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa ) __lowerCamelCase = np.zeros((n_batch, 2) ,dtype=np.intaa ) __lowerCamelCase = np.full((n_batch, 2, input_len) ,fill_value=-1_00 ,dtype=np.intaa ) __lowerCamelCase = np.zeros((n_batch,) ,dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(_UpperCamelCase ): __lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __lowerCamelCase = with_conta __lowerCamelCase = with_conta __lowerCamelCase = len(_UpperCamelCase ) - 1 __lowerCamelCase = len(_UpperCamelCase ) - 1 __lowerCamelCase = with_conta __lowerCamelCase = with_conta __lowerCamelCase = mc_label __lowerCamelCase = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(_UpperCamelCase ) for t in all_inputs ) ) return tensor_datasets def a__ ( ): __lowerCamelCase = argparse.ArgumentParser() parser.add_argument('''--model_name''' ,type=_UpperCamelCase ,default='''openai-gpt''' ,help='''pretrained model name''' ) parser.add_argument('''--do_train''' ,action='''store_true''' ,help='''Whether to run training.''' ) parser.add_argument('''--do_eval''' ,action='''store_true''' ,help='''Whether to run eval on the dev set.''' ) parser.add_argument( '''--output_dir''' ,default=_UpperCamelCase ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''The output directory where the model predictions and checkpoints will be written.''' ,) parser.add_argument('''--train_dataset''' ,type=_UpperCamelCase ,default='''''' ) parser.add_argument('''--eval_dataset''' ,type=_UpperCamelCase ,default='''''' ) parser.add_argument('''--seed''' ,type=_UpperCamelCase ,default=42 ) parser.add_argument('''--num_train_epochs''' ,type=_UpperCamelCase ,default=3 ) parser.add_argument('''--train_batch_size''' ,type=_UpperCamelCase ,default=8 ) parser.add_argument('''--eval_batch_size''' ,type=_UpperCamelCase ,default=16 ) parser.add_argument('''--adam_epsilon''' ,default=1e-8 ,type=_UpperCamelCase ,help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' ,type=_UpperCamelCase ,default=1 ) parser.add_argument( '''--max_steps''' ,default=-1 ,type=_UpperCamelCase ,help=( '''If > 0: set total number of training steps to perform. Override num_train_epochs.''' ) ,) parser.add_argument( '''--gradient_accumulation_steps''' ,type=_UpperCamelCase ,default=1 ,help='''Number of updates steps to accumulate before performing a backward/update pass.''' ,) parser.add_argument('''--learning_rate''' ,type=_UpperCamelCase ,default=6.25e-5 ) parser.add_argument('''--warmup_steps''' ,default=0 ,type=_UpperCamelCase ,help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--lr_schedule''' ,type=_UpperCamelCase ,default='''warmup_linear''' ) parser.add_argument('''--weight_decay''' ,type=_UpperCamelCase ,default=0.01 ) parser.add_argument('''--lm_coef''' ,type=_UpperCamelCase ,default=0.9 ) parser.add_argument('''--n_valid''' ,type=_UpperCamelCase ,default=3_74 ) parser.add_argument('''--server_ip''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' ) __lowerCamelCase = parser.parse_args() print(_UpperCamelCase ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=_UpperCamelCase ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) __lowerCamelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) __lowerCamelCase = torch.cuda.device_count() logger.info('''device: {}, n_gpu {}'''.format(_UpperCamelCase ,_UpperCamelCase ) ) if not args.do_train and not args.do_eval: raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset __lowerCamelCase = ['''_start_''', '''_delimiter_''', '''_classify_'''] __lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(_UpperCamelCase ) __lowerCamelCase = tokenizer.convert_tokens_to_ids(_UpperCamelCase ) __lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(_UpperCamelCase ) ) model.to(_UpperCamelCase ) # Load and encode the datasets def tokenize_and_encode(_UpperCamelCase : Tuple ): if isinstance(_UpperCamelCase ,_UpperCamelCase ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCamelCase ) ) elif isinstance(_UpperCamelCase ,_UpperCamelCase ): return obj return [tokenize_and_encode(_UpperCamelCase ) for o in obj] logger.info('''Encoding dataset...''' ) __lowerCamelCase = load_rocstories_dataset(args.train_dataset ) __lowerCamelCase = load_rocstories_dataset(args.eval_dataset ) __lowerCamelCase = (train_dataset, eval_dataset) __lowerCamelCase = tokenize_and_encode(_UpperCamelCase ) # Compute the max input length for the Transformer __lowerCamelCase = model.config.n_positions // 2 - 2 __lowerCamelCase = max( len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) __lowerCamelCase = min(_UpperCamelCase ,model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders __lowerCamelCase = pre_process_datasets(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,*_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = tensor_datasets[0], tensor_datasets[1] __lowerCamelCase = TensorDataset(*_UpperCamelCase ) __lowerCamelCase = RandomSampler(_UpperCamelCase ) __lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.train_batch_size ) __lowerCamelCase = TensorDataset(*_UpperCamelCase ) __lowerCamelCase = SequentialSampler(_UpperCamelCase ) __lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: __lowerCamelCase = args.max_steps __lowerCamelCase = args.max_steps // (len(_UpperCamelCase ) // args.gradient_accumulation_steps) + 1 else: __lowerCamelCase = len(_UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs __lowerCamelCase = list(model.named_parameters() ) __lowerCamelCase = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight'''] __lowerCamelCase = [ { '''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], '''weight_decay''': args.weight_decay, }, {'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0}, ] __lowerCamelCase = AdamW(_UpperCamelCase ,lr=args.learning_rate ,eps=args.adam_epsilon ) __lowerCamelCase = get_linear_schedule_with_warmup( _UpperCamelCase ,num_warmup_steps=args.warmup_steps ,num_training_steps=_UpperCamelCase ) if args.do_train: __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) ,desc='''Epoch''' ): __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = tqdm(_UpperCamelCase ,desc='''Training''' ) for step, batch in enumerate(_UpperCamelCase ): __lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch ) __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch __lowerCamelCase = model(_UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase ) __lowerCamelCase = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() __lowerCamelCase = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 __lowerCamelCase = '''Training loss: {:.2e} lr: {:.2e}'''.format(_UpperCamelCase ,scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer __lowerCamelCase = model.module if hasattr(_UpperCamelCase ,'''module''' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` __lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase ) __lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase ) torch.save(model_to_save.state_dict() ,_UpperCamelCase ) model_to_save.config.to_json_file(_UpperCamelCase ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned __lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) __lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(_UpperCamelCase ) if args.do_eval: model.eval() __lowerCamelCase ,__lowerCamelCase = 0, 0 __lowerCamelCase ,__lowerCamelCase = 0, 0 for batch in tqdm(_UpperCamelCase ,desc='''Evaluating''' ): __lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch ) __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch with torch.no_grad(): __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = model( _UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase ) __lowerCamelCase = mc_logits.detach().cpu().numpy() __lowerCamelCase = mc_labels.to('''cpu''' ).numpy() __lowerCamelCase = accuracy(_UpperCamelCase ,_UpperCamelCase ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 __lowerCamelCase = eval_loss / nb_eval_steps __lowerCamelCase = eval_accuracy / nb_eval_examples __lowerCamelCase = tr_loss / nb_tr_steps if args.do_train else None __lowerCamelCase = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss} __lowerCamelCase = os.path.join(args.output_dir ,'''eval_results.txt''' ) with open(_UpperCamelCase ,'''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' ,_UpperCamelCase ,str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) if __name__ == "__main__": main()
330
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def a__ ( _UpperCamelCase : Optional[int] ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __lowerCamelCase = module __lowerCamelCase = nn.Sequential( nn.Linear(module.in_features , __UpperCAmelCase , bias=__UpperCAmelCase ) , nn.Linear(__UpperCAmelCase , module.out_features , bias=__UpperCAmelCase ) , ) __lowerCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__UpperCAmelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowerCamelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.module(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) + self.adapter(__UpperCAmelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module lowerCAmelCase__ = """bigscience/bloom-1b7""" # Constant values lowerCAmelCase__ = 2.1_09_65_95_52_69_25_74 lowerCAmelCase__ = """Hello my name is""" lowerCAmelCase__ = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) lowerCAmelCase__ = 1_0 def lowerCamelCase ( self ): '''simple docstring''' # Models and tokenizer __lowerCamelCase = AutoTokenizer.from_pretrained(self.model_name ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # Models and tokenizer __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='''auto''' ) __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_abit.config self.assertTrue(hasattr(__UpperCAmelCase , '''quantization_config''' ) ) __lowerCamelCase = config.to_dict() __lowerCamelCase = config.to_diff_dict() __lowerCamelCase = config.to_json_string() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit __lowerCamelCase = self.model_fpaa.get_memory_footprint() __lowerCamelCase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __lowerCamelCase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__UpperCAmelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() __lowerCamelCase = True __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = model_abit_from_config.generate( input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() with self.assertRaises(__UpperCAmelCase ): __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ): # Tries with `str` self.model_abit.to('''cpu''' ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.to(torch.device('''cuda:0''' ) ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_fpaa.to(torch.floataa ) __lowerCamelCase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.to('''cpu''' ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.half() # Check this does not throw an error __lowerCamelCase = self.model_fpaa.float() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): @classmethod def lowerCamelCase ( cls ): '''simple docstring''' __lowerCamelCase = '''t5-small''' __lowerCamelCase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense __lowerCamelCase = AutoTokenizer.from_pretrained(cls.model_name ) __lowerCamelCase = '''Translate in German: Hello, my dog is cute''' def lowerCamelCase ( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaForConditionalGeneration __lowerCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules __lowerCamelCase = None # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) __lowerCamelCase = modules def lowerCamelCase ( self ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # model_name __lowerCamelCase = '''bigscience/bloom-560m''' __lowerCamelCase = '''t5-small''' # Different types of model __lowerCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Sequence classification model __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # CausalLM model __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Seq2seq model __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = pipeline( '''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __lowerCamelCase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''balanced''' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) # Second real batch __lowerCamelCase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = '''facebook/opt-350m''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ): return # Step 1: freeze all parameters __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __lowerCamelCase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __lowerCamelCase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__UpperCAmelCase ) ): __lowerCamelCase = LoRALayer(module.q_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.k_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __lowerCamelCase = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __lowerCamelCase = model.forward(**__UpperCAmelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__UpperCAmelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = """gpt2-xl""" lowerCAmelCase__ = 3.31_91_85_48_54_15_21_87
330
1
from __future__ import annotations import os from collections.abc import Mapping a_ = tuple[int, int] class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = vertices __lowerCamelCase = { (min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items() } def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) __lowerCamelCase = weight def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = Graph({min(self.vertices )} , {} ) __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = 42 while len(subgraph.vertices ) < len(self.vertices ): __lowerCamelCase = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: __lowerCamelCase = edge __lowerCamelCase = weight subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase ) return subgraph def a__ ( _UpperCamelCase : str = "p107_network.txt" ): __lowerCamelCase = os.path.abspath(os.path.dirname(_UpperCamelCase ) ) __lowerCamelCase = os.path.join(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = 42 with open(_UpperCamelCase ) as f: __lowerCamelCase = f.read().strip().split('''\n''' ) __lowerCamelCase = [line.split(''',''' ) for line in data] for edgea in range(1 ,len(_UpperCamelCase ) ): for edgea in range(_UpperCamelCase ): if adjaceny_matrix[edgea][edgea] != "-": __lowerCamelCase = int(adjaceny_matrix[edgea][edgea] ) __lowerCamelCase = Graph(set(range(len(_UpperCamelCase ) ) ) ,_UpperCamelCase ) __lowerCamelCase = graph.prims_algorithm() __lowerCamelCase = sum(graph.edges.values() ) __lowerCamelCase = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f"{solution() = }")
330
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = 42 class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ = True @register_to_config def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (64,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 4 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 0.18_215 , ): '''simple docstring''' super().__init__() # pass init params to Encoder __lowerCamelCase = Encoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , ) # pass init params to Decoder __lowerCamelCase = Decoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , ) __lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __lowerCamelCase = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 ) __lowerCamelCase = False __lowerCamelCase = False # only relevant if vae tiling is enabled __lowerCamelCase = self.config.sample_size __lowerCamelCase = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __lowerCamelCase = 0.25 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' if isinstance(__UpperCAmelCase , (Encoder, Decoder) ): __lowerCamelCase = value def lowerCamelCase ( self , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = use_tiling def lowerCamelCase ( self ): '''simple docstring''' self.enable_tiling(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = True def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = {} def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): __lowerCamelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return processors def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = len(self.attn_processors.keys() ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count: raise ValueError( F"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the""" F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): module.set_processor(__UpperCAmelCase ) else: module.set_processor(processor.pop(F"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) if self.use_slicing and x.shape[0] > 1: __lowerCamelCase = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_slicing and z.shape[0] > 1: __lowerCamelCase = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self._decode(__UpperCAmelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[2] , b.shape[2] , __UpperCAmelCase ) for y in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[3] , b.shape[3] , __UpperCAmelCase ) for x in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __lowerCamelCase = [] for i in range(0 , x.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , x.shape[3] , __UpperCAmelCase ): __lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __lowerCamelCase = [] for i in range(0 , z.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , z.shape[3] , __UpperCAmelCase ): __lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ): '''simple docstring''' __lowerCamelCase = sample __lowerCamelCase = self.encode(__UpperCAmelCase ).latent_dist if sample_posterior: __lowerCamelCase = posterior.sample(generator=__UpperCAmelCase ) else: __lowerCamelCase = posterior.mode() __lowerCamelCase = self.decode(__UpperCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase )
330
1
from collections import defaultdict class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 __lowerCamelCase = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(__UpperCAmelCase ) ) ] __lowerCamelCase = defaultdict(__UpperCAmelCase ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 __lowerCamelCase = (1 << len(__UpperCAmelCase )) - 1 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # if mask == self.finalmask all persons are distributed tasks, return 1 if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement __lowerCamelCase = self.count_ways_until(__UpperCAmelCase , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. __lowerCamelCase = total_ways_util return self.dp[mask][task_no] def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # Store the list of persons for each task for i in range(len(__UpperCAmelCase ) ): for j in task_performed[i]: self.task[j].append(__UpperCAmelCase ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": a_ = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. a_ = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
330
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration a_ = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] a_ = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] a_ = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) a_ = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) a_ = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ): for tf_name, hf_name in patterns: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase ) __lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() __lowerCamelCase = {} # separating decoder weights __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = DECODER_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = REMAINING_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" __lowerCamelCase = mapping['''model.embed_positions.weight'''] __lowerCamelCase = mapping.pop('''model.embed_positions.weight''' ) __lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : int ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ): __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() a_ = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
330
1
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset a_ = """bert-base-cased""" a_ = """google/pegasus-xsum""" a_ = [""" Sam ate lunch today.""", """Sams lunch ingredients."""] a_ = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""] a_ = """patrickvonplaten/t5-tiny-random""" a_ = """sshleifer/bart-tiny-random""" a_ = """sshleifer/tiny-mbart""" a_ = """sshleifer/tiny-marian-en-de""" def a__ ( _UpperCamelCase : Path ,_UpperCamelCase : list ): __lowerCamelCase = '''\n'''.join(_UpperCamelCase ) Path(_UpperCamelCase ).open('''w''' ).writelines(_UpperCamelCase ) def a__ ( _UpperCamelCase : Tuple ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(_UpperCamelCase ,F"""{split}.source""" ) ,_UpperCamelCase ) _dump_articles(os.path.join(_UpperCamelCase ,F"""{split}.target""" ) ,_UpperCamelCase ) return tmp_dir class __lowerCAmelCase ( lowerCAmelCase__ ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = AutoTokenizer.from_pretrained(__UpperCAmelCase ) __lowerCamelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) __lowerCamelCase = max(len(tokenizer.encode(__UpperCAmelCase ) ) for a in ARTICLES ) __lowerCamelCase = max(len(tokenizer.encode(__UpperCAmelCase ) ) for a in SUMMARIES ) __lowerCamelCase = 4 __lowerCamelCase = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated __lowerCamelCase ,__lowerCamelCase = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error. __lowerCamelCase = SeqaSeqDataset( __UpperCAmelCase , data_dir=__UpperCAmelCase , type_path='''train''' , max_source_length=__UpperCAmelCase , max_target_length=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , ) __lowerCamelCase = DataLoader(__UpperCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place __lowerCamelCase = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = AutoTokenizer.from_pretrained(__UpperCAmelCase ) __lowerCamelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) __lowerCamelCase = max(len(tokenizer.encode(__UpperCAmelCase ) ) for a in ARTICLES ) __lowerCamelCase = max(len(tokenizer.encode(__UpperCAmelCase ) ) for a in SUMMARIES ) __lowerCamelCase = 4 __lowerCamelCase = LegacySeqaSeqDataset( __UpperCAmelCase , data_dir=__UpperCAmelCase , type_path='''train''' , max_source_length=20 , max_target_length=__UpperCAmelCase , ) __lowerCamelCase = DataLoader(__UpperCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' ) __lowerCamelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) __lowerCamelCase = tmp_dir.joinpath('''train.source''' ).open().readlines() __lowerCamelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(__UpperCAmelCase , __UpperCAmelCase , 128 , __UpperCAmelCase ) __lowerCamelCase = {x.name for x in tmp_dir.iterdir()} __lowerCamelCase = {x.name for x in save_dir.iterdir()} __lowerCamelCase = save_dir.joinpath('''train.source''' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(__UpperCAmelCase ) < len(__UpperCAmelCase ) assert len(__UpperCAmelCase ) == 1 assert len(packed_examples[0] ) == sum(len(__UpperCAmelCase ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' ) def lowerCamelCase ( self ): '''simple docstring''' if not FAIRSEQ_AVAILABLE: return __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = self._get_dataset(max_len=64 ) __lowerCamelCase = 64 __lowerCamelCase = ds.make_dynamic_sampler(__UpperCAmelCase , required_batch_size_multiple=__UpperCAmelCase ) __lowerCamelCase = [len(__UpperCAmelCase ) for x in batch_sampler] assert len(set(__UpperCAmelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(__UpperCAmelCase ) == len(__UpperCAmelCase ) # no dropped or added examples __lowerCamelCase = DataLoader(__UpperCAmelCase , batch_sampler=__UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2 ) __lowerCamelCase = [] __lowerCamelCase = [] for batch in data_loader: __lowerCamelCase = batch['''input_ids'''].shape __lowerCamelCase = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple __lowerCamelCase = np.product(batch['''input_ids'''].shape ) num_src_per_batch.append(__UpperCAmelCase ) if num_src_tokens > (max_tokens * 1.1): failures.append(__UpperCAmelCase ) assert num_src_per_batch[0] == max(__UpperCAmelCase ) if failures: raise AssertionError(F"""too many tokens in {len(__UpperCAmelCase )} batches""" ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = self._get_dataset(max_len=512 ) __lowerCamelCase = 2 __lowerCamelCase = ds.make_sortish_sampler(__UpperCAmelCase , shuffle=__UpperCAmelCase ) __lowerCamelCase = DataLoader(__UpperCAmelCase , batch_size=__UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2 ) __lowerCamelCase = DataLoader(__UpperCAmelCase , batch_size=__UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__UpperCAmelCase ) __lowerCamelCase = tokenizer.pad_token_id def count_pad_tokens(__UpperCAmelCase , __UpperCAmelCase="input_ids" ): return [batch[k].eq(__UpperCAmelCase ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(__UpperCAmelCase , k='''labels''' ) ) < sum(count_pad_tokens(__UpperCAmelCase , k='''labels''' ) ) assert sum(count_pad_tokens(__UpperCAmelCase ) ) < sum(count_pad_tokens(__UpperCAmelCase ) ) assert len(__UpperCAmelCase ) == len(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase=1000 , __UpperCAmelCase=128 ): '''simple docstring''' if os.getenv('''USE_REAL_DATA''' , __UpperCAmelCase ): __lowerCamelCase = '''examples/seq2seq/wmt_en_ro''' __lowerCamelCase = max_len * 2 * 64 if not Path(__UpperCAmelCase ).joinpath('''train.len''' ).exists(): save_len_file(__UpperCAmelCase , __UpperCAmelCase ) else: __lowerCamelCase = '''examples/seq2seq/test_data/wmt_en_ro''' __lowerCamelCase = max_len * 4 save_len_file(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = AutoTokenizer.from_pretrained(__UpperCAmelCase ) __lowerCamelCase = SeqaSeqDataset( __UpperCAmelCase , data_dir=__UpperCAmelCase , type_path='''train''' , max_source_length=__UpperCAmelCase , max_target_length=__UpperCAmelCase , n_obs=__UpperCAmelCase , ) return ds, max_tokens, tokenizer def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = self._get_dataset() __lowerCamelCase = set(DistributedSortishSampler(__UpperCAmelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=__UpperCAmelCase ) ) __lowerCamelCase = set(DistributedSortishSampler(__UpperCAmelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=__UpperCAmelCase ) ) assert idsa.intersection(__UpperCAmelCase ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = AutoTokenizer.from_pretrained(__UpperCAmelCase , use_fast=__UpperCAmelCase ) if tok_name == MBART_TINY: __lowerCamelCase = SeqaSeqDataset( __UpperCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , ) __lowerCamelCase = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: __lowerCamelCase = SeqaSeqDataset( __UpperCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , ) __lowerCamelCase = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(__UpperCAmelCase ) == 1 if tok_name == BART_TINY else len(__UpperCAmelCase ) == 0
330
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch a_ = logging.get_logger(__name__) class __lowerCAmelCase : def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase=None ): '''simple docstring''' if not conversation_id: __lowerCamelCase = uuid.uuida() if past_user_inputs is None: __lowerCamelCase = [] if generated_responses is None: __lowerCamelCase = [] __lowerCamelCase = conversation_id __lowerCamelCase = past_user_inputs __lowerCamelCase = generated_responses __lowerCamelCase = text def __eq__( self , __UpperCAmelCase ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) __lowerCamelCase = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: __lowerCamelCase = text def lowerCamelCase ( self ): '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __lowerCamelCase = None def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' self.generated_responses.append(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): '''simple docstring''' __lowerCamelCase = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): __lowerCamelCase = '''user''' if is_user else '''bot''' output += F"""{name} >> {text} \n""" return output @add_end_docstrings( lowerCAmelCase__ , r""" min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. """ , ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) if self.tokenizer.pad_token_id is None: __lowerCamelCase = self.tokenizer.eos_token def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = {} __lowerCamelCase = {} __lowerCamelCase = {} if min_length_for_response is not None: __lowerCamelCase = min_length_for_response if minimum_tokens is not None: __lowerCamelCase = minimum_tokens if "max_length" in generate_kwargs: __lowerCamelCase = generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __lowerCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self , __UpperCAmelCase , __UpperCAmelCase=0 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = super().__call__(__UpperCAmelCase , num_workers=__UpperCAmelCase , **__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=32 ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ): __lowerCamelCase = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version __lowerCamelCase = self._legacy_parse_and_tokenize(__UpperCAmelCase ) if self.framework == "pt": __lowerCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": __lowerCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=10 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) __lowerCamelCase = model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) __lowerCamelCase = max_length - minimum_tokens __lowerCamelCase = model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: __lowerCamelCase = model_inputs['''attention_mask'''][:, -trim:] __lowerCamelCase = model_inputs.pop('''conversation''' ) __lowerCamelCase = max_length __lowerCamelCase = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase ) if self.model.config.is_encoder_decoder: __lowerCamelCase = 1 else: __lowerCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = model_outputs['''output_ids'''] __lowerCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , ) __lowerCamelCase = model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(__UpperCAmelCase ) return conversation def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.tokenizer.eos_token_id __lowerCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > self.tokenizer.model_max_length: __lowerCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
330
1
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel UpperCAmelCase__ = HfApi() UpperCAmelCase__ = {} # fmt: off UpperCAmelCase__ = torch.tensor([ -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467, 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189, -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839, 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557 ]) UpperCAmelCase__ = torch.tensor([ -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436, 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208, -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948, 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365 ]) UpperCAmelCase__ = torch.tensor([ -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869, -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304, -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925, 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943 ]) UpperCAmelCase__ = torch.tensor([ 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172, -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309, 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805, -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505 ]) UpperCAmelCase__ = torch.tensor([ 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133, -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395, 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559, -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386 ]) UpperCAmelCase__ = torch.tensor([ 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078, -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330, 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683, -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431 ]) UpperCAmelCase__ = torch.tensor([ 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042, -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398, 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574, -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390 ]) UpperCAmelCase__ = torch.tensor([ 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042, -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290, 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746, -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473 ]) UpperCAmelCase__ = torch.tensor([ -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330, 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243, -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810, 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251]) UpperCAmelCase__ = torch.tensor([ -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324, 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181, -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259, 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266 ]) UpperCAmelCase__ = torch.tensor([ -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212, 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027, -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131, 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355 ]) UpperCAmelCase__ = torch.tensor([ -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959, 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351, -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341, 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066 ]) UpperCAmelCase__ = torch.tensor([ -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740, 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398, -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395, 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243 ]) UpperCAmelCase__ = torch.tensor([ -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336, 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908, -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560, 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343 ]) UpperCAmelCase__ = torch.tensor([ -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344, 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391, -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439, 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219 ]) # fmt: on UpperCAmelCase__ = api.list_models(filter="diffusers") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": UpperCAmelCase__ = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1] print(f"""Started running {mod.modelId}!!!""") if mod.modelId.startswith("CompVis"): UpperCAmelCase__ = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet") else: UpperCAmelCase__ = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) UpperCAmelCase__ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) UpperCAmelCase__ = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): UpperCAmelCase__ = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3 ) print(f"""{mod.modelId} has passed successfully!!!""")
0
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params a_ = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["""memory_attention""", """encoder_attn"""], ["""attention""", """attn"""], ["""/""", """."""], [""".LayerNorm.gamma""", """_layer_norm.weight"""], [""".LayerNorm.beta""", """_layer_norm.bias"""], ["""r.layer_""", """r.layers."""], ["""output_proj""", """out_proj"""], ["""ffn.dense_1.""", """fc2."""], ["""ffn.dense.""", """fc1."""], ["""ffn_layer_norm""", """final_layer_norm"""], ["""kernel""", """weight"""], ["""encoder_layer_norm.""", """encoder.layer_norm."""], ["""decoder_layer_norm.""", """decoder.layer_norm."""], ["""embeddings.weights""", """shared.weight"""], ] def a__ ( _UpperCamelCase : int ): for pegasus_name, hf_name in PATTERNS: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = DEFAULTS.copy() cfg_kwargs.update(_UpperCamelCase ) __lowerCamelCase = PegasusConfig(**_UpperCamelCase ) __lowerCamelCase = PegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.model.state_dict() __lowerCamelCase = {} for k, v in tf_weights.items(): __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ) if new_k not in sd: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if "dense" in k or "proj" in new_k: __lowerCamelCase = v.T __lowerCamelCase = torch.tensor(_UpperCamelCase ,dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}""" # make sure embedding.padding_idx is respected __lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] ) __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping} mapping.update(**_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = torch_model.model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight'''] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : str="./ckpt/aeslc/model.ckpt-32000" ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''Adafactor''', '''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # save tokenizer first __lowerCamelCase = Path(_UpperCamelCase ).parent.name __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings'''] __lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' ,model_max_length=_UpperCamelCase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(_UpperCamelCase ) # convert model __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""] if dataset == "large": __lowerCamelCase = task_specific_params __lowerCamelCase = convert_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() sd.pop('''model.decoder.embed_positions.weight''' ) sd.pop('''model.encoder.embed_positions.weight''' ) torch.save(_UpperCamelCase ,Path(_UpperCamelCase ) / '''pytorch_model.bin''' ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() if args.save_dir is None: a_ = Path(args.tf_ckpt_path).parent.name a_ = os.path.join("""pegasus""", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
330
0
'''simple docstring''' from datetime import datetime as dt import os from github import Github SCREAMING_SNAKE_CASE_: Optional[Any] =[ 'good first issue', 'good second issue', 'good difficult issue', 'feature request', 'new model', 'wip', ] def lowerCAmelCase_ ( ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] ) UpperCAmelCase_ = g.get_repo("huggingface/transformers" ) UpperCAmelCase_ = repo.get_issues(state="open" ) for issue in open_issues: UpperCAmelCase_ = sorted([comment for comment in issue.get_comments()] , key=lambda snake_case_ : i.created_at , reverse=snake_case_ ) UpperCAmelCase_ = comments[0] if len(snake_case_ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state="closed" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) if __name__ == "__main__": main()
1
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } a_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ): for attribute in key.split('''.''' ): __lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ) if weight_type is not None: __lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ).shape else: __lowerCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCamelCase = value elif weight_type == "weight_g": __lowerCamelCase = value elif weight_type == "weight_v": __lowerCamelCase = value elif weight_type == "bias": __lowerCamelCase = value else: __lowerCamelCase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Any ): __lowerCamelCase = [] __lowerCamelCase = fairseq_model.state_dict() __lowerCamelCase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __lowerCamelCase = False if "conv_layers" in name: load_conv_layer( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,) __lowerCamelCase = True else: for key, mapped_key in MAPPING.items(): __lowerCamelCase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue __lowerCamelCase = True if "*" in mapped_key: __lowerCamelCase = name.split(_UpperCamelCase )[0].split('''.''' )[-2] __lowerCamelCase = mapped_key.replace('''*''' ,_UpperCamelCase ) if "weight_g" in name: __lowerCamelCase = '''weight_g''' elif "weight_v" in name: __lowerCamelCase = '''weight_v''' elif "bias" in name: __lowerCamelCase = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCamelCase = '''weight''' else: __lowerCamelCase = None set_recursively(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) continue if not is_used: unused_weights.append(_UpperCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ): __lowerCamelCase = full_name.split('''conv_layers.''' )[-1] __lowerCamelCase = name.split('''.''' ) __lowerCamelCase = int(items[0] ) __lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_UpperCamelCase ) @torch.no_grad() def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : List[str] ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : List[Any]=True ): if config_path is not None: __lowerCamelCase = UniSpeechSatConfig.from_pretrained(_UpperCamelCase ) else: __lowerCamelCase = UniSpeechSatConfig() __lowerCamelCase = '''''' if is_finetuned: __lowerCamelCase = UniSpeechSatForCTC(_UpperCamelCase ) else: __lowerCamelCase = UniSpeechSatForPreTraining(_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) __lowerCamelCase = model[0].eval() recursively_load_weights(_UpperCamelCase ,_UpperCamelCase ) hf_wavavec.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) a_ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
330
0
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) lowerCamelCase : Optional[Any] = logging.getLogger(__name__) @dataclass(frozen=lowercase_ ) class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : str lowerCAmelCase__ : str lowerCAmelCase__ : Optional[str] = None lowerCAmelCase__ : Optional[str] = None lowerCAmelCase__ : Optional[str] = None @dataclass(frozen=lowercase_ ) class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : List[int] lowerCAmelCase__ : Optional[List[int]] = None lowerCAmelCase__ : Optional[List[int]] = None lowerCAmelCase__ : Optional[Union[int, float]] = None lowerCAmelCase__ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[InputFeatures] def __init__(self : Optional[int] , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : str , UpperCamelCase : Optional[int] = None , UpperCamelCase : List[Any]=False , UpperCamelCase : bool = False , ): '''simple docstring''' lowercase__ = hans_processors[task]() lowercase__ = os.path.join( UpperCamelCase , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(UpperCamelCase ) , UpperCamelCase , ) , ) lowercase__ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) lowercase__ ,lowercase__ = label_list[2], label_list[1] lowercase__ = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowercase__ = cached_features_file + '''.lock''' with FileLock(UpperCamelCase ): if os.path.exists(UpperCamelCase ) and not overwrite_cache: logger.info(f"Loading features from cached file {cached_features_file}" ) lowercase__ = torch.load(UpperCamelCase ) else: logger.info(f"Creating features from dataset file at {data_dir}" ) lowercase__ = ( processor.get_dev_examples(UpperCamelCase ) if evaluate else processor.get_train_examples(UpperCamelCase ) ) logger.info('''Training examples: %s''' , len(UpperCamelCase ) ) lowercase__ = hans_convert_examples_to_features(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) logger.info('''Saving features into cached file %s''' , UpperCamelCase ) torch.save(self.features , UpperCamelCase ) def __len__(self : Optional[int] ): '''simple docstring''' return len(self.features ) def __getitem__(self : Tuple , UpperCamelCase : str ): '''simple docstring''' return self.features[i] def UpperCamelCase__ (self : str ): '''simple docstring''' return self.label_list if is_tf_available(): import tensorflow as tf class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : List[InputFeatures] def __init__(self : List[Any] , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : str , UpperCamelCase : Optional[int] = 128 , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : bool = False , ): '''simple docstring''' lowercase__ = hans_processors[task]() lowercase__ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) lowercase__ ,lowercase__ = label_list[2], label_list[1] lowercase__ = label_list lowercase__ = processor.get_dev_examples(UpperCamelCase ) if evaluate else processor.get_train_examples(UpperCamelCase ) lowercase__ = hans_convert_examples_to_features(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 10000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(UpperCamelCase )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) lowercase__ = tf.data.Dataset.from_generator( UpperCamelCase , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' return self.dataset def __len__(self : List[Any] ): '''simple docstring''' return len(self.features ) def __getitem__(self : Union[str, Any] , UpperCamelCase : Tuple ): '''simple docstring''' return self.features[i] def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' return self.label_list class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Any ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' ) def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[Any] ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' return ["contradiction", "entailment", "neutral"] def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Optional[int] ): '''simple docstring''' lowercase__ = [] for i, line in enumerate(UpperCamelCase ): if i == 0: continue lowercase__ = '''%s-%s''' % (set_type, line[0]) lowercase__ = line[5] lowercase__ = line[6] lowercase__ = line[7][2:] if line[7].startswith('''ex''' ) else line[7] lowercase__ = line[0] examples.append(InputExample(guid=UpperCamelCase , text_a=UpperCamelCase , text_b=UpperCamelCase , label=UpperCamelCase , pairID=UpperCamelCase ) ) return examples def _SCREAMING_SNAKE_CASE (A , A , A , A , ) -> int: """simple docstring""" lowercase__ = {label: i for i, label in enumerate(A )} lowercase__ = [] for ex_index, example in tqdm.tqdm(enumerate(A ) , desc='''convert examples to features''' ): if ex_index % 10_000 == 0: logger.info('''Writing example %d''' % (ex_index) ) lowercase__ = tokenizer( example.text_a , example.text_b , add_special_tokens=A , max_length=A , padding='''max_length''' , truncation=A , return_overflowing_tokens=A , ) lowercase__ = label_map[example.label] if example.label in label_map else 0 lowercase__ = int(example.pairID ) features.append(InputFeatures(**A , label=A , pairID=A ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"guid: {example}" ) logger.info(f"features: {features[i]}" ) return features lowerCamelCase : Tuple = { 'hans': 3, } lowerCamelCase : str = { 'hans': HansProcessor, }
2
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING a_ = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase__ ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type(__UpperCAmelCase ) def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , **__UpperCAmelCase ): '''simple docstring''' return {}, {}, {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = load_image(__UpperCAmelCase ) __lowerCamelCase = image.size __lowerCamelCase = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework ) return model_inputs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.model(**__UpperCAmelCase ) return model_outputs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = model_outputs.predicted_depth __lowerCamelCase = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=__UpperCAmelCase ) __lowerCamelCase = prediction.squeeze().cpu().numpy() __lowerCamelCase = (output * 255 / np.max(__UpperCAmelCase )).astype('''uint8''' ) __lowerCamelCase = Image.fromarray(__UpperCAmelCase ) __lowerCamelCase = {} __lowerCamelCase = predicted_depth __lowerCamelCase = depth return output_dict
330
0
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ): A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ): A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ): A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
3
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = ["""pixel_values"""] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __lowerCamelCase = size if size is not None else {'''shortest_edge''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __lowerCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' ) __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = resample __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_normalize __lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD __lowerCamelCase = do_convert_rgb def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowerCamelCase = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = do_resize if do_resize is not None else self.do_resize __lowerCamelCase = size if size is not None else self.size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = resample if resample is not None else self.resample __lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCamelCase = crop_size if crop_size is not None else self.crop_size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCamelCase = image_mean if image_mean is not None else self.image_mean __lowerCamelCase = image_std if image_std is not None else self.image_std __lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowerCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowerCamelCase = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: __lowerCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: __lowerCamelCase = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: __lowerCamelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] __lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __lowerCamelCase = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
330
0
'''simple docstring''' import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class UpperCAmelCase_ ( __lowercase ): lowerCamelCase : str = (KDPMaDiscreteScheduler,) lowerCamelCase : Optional[int] = 10 def __UpperCAmelCase ( self : List[Any] , **UpperCAmelCase__ : Optional[int] ) -> Any: lowerCAmelCase = { 'num_train_timesteps': 1_1_0_0, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', } config.update(**UpperCAmelCase__ ) return config def __UpperCAmelCase ( self : List[str] ) -> Tuple: for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCAmelCase__ ) def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__ ) def __UpperCAmelCase ( self : Optional[Any] ) -> int: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=UpperCAmelCase__ ) def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase__ ) def __UpperCAmelCase ( self : int ) -> Dict: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' ) lowerCAmelCase = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase = sample.to(UpperCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = output.prev_sample lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) ) lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2 assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2 assert abs(result_mean.item() - 0.0_002 ) < 1E-3 def __UpperCAmelCase ( self : Tuple ) -> Dict: if torch_device == "mps": return lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase = sample.to(UpperCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = output.prev_sample lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) ) lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3 def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: if torch_device == "mps": return lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase__ ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter.to(UpperCAmelCase__ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = output.prev_sample lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) ) lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) ) if str(UpperCAmelCase__ ).startswith('cpu' ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3
4
from __future__ import annotations from typing import Generic, TypeVar a_ = TypeVar("""T""") class __lowerCAmelCase ( Generic[T] ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = data __lowerCamelCase = self __lowerCamelCase = 0 class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # map from node name to the node object __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # create a new set with x as its member __lowerCamelCase = DisjointSetTreeNode(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # find the set x belongs to (with path-compression) __lowerCamelCase = self.map[data] if elem_ref != elem_ref.parent: __lowerCamelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # helper function for union operation if nodea.rank > nodea.rank: __lowerCamelCase = nodea else: __lowerCamelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # merge 2 disjoint sets self.link(self.find_set(__UpperCAmelCase ) , self.find_set(__UpperCAmelCase ) ) class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # connections: map from the node to the neighbouring nodes (with weights) __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # add a node ONLY if its not present in the graph if node not in self.connections: __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # add an edge with the given weight self.add_node(__UpperCAmelCase ) self.add_node(__UpperCAmelCase ) __lowerCamelCase = weight __lowerCamelCase = weight def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __UpperCAmelCase : x[2] ) # creating the disjoint set __lowerCamelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__UpperCAmelCase ) # MST generation __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = edges[index] index += 1 __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) disjoint_set.union(__UpperCAmelCase , __UpperCAmelCase ) return graph
330
0
# flake8: noqa # Lint as: python3 UpperCAmelCase__ = [ '''VerificationMode''', '''Version''', '''disable_progress_bar''', '''enable_progress_bar''', '''is_progress_bar_enabled''', '''experimental''', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
5
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self ): '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_config() __lowerCamelCase = 300 return config def lowerCamelCase ( self ): '''simple docstring''' ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = self.prepare_config_and_inputs() __lowerCamelCase = True __lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = True __lowerCamelCase = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_choices __lowerCamelCase = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = config_and_inputs __lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = () def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCamelCase = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason='''MRA does not output attentions''' ) def lowerCamelCase ( self ): '''simple docstring''' return @require_torch class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) __lowerCamelCase = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
0
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: A : List[str] = None A : List[str] = logging.get_logger(__name__) A : int = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} A : Optional[Any] = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json', 'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json', }, } A : List[str] = { 'facebook/mbart-large-en-ro': 1_0_2_4, 'facebook/mbart-large-cc25': 1_0_2_4, } # fmt: off A : Optional[int] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class __A( a ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = ['''input_ids''', '''attention_mask'''] snake_case_ = MBartTokenizer snake_case_ = [] snake_case_ = [] def __init__( self , _snake_case=None , _snake_case=None , _snake_case="<s>" , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case , ) -> Tuple: '''simple docstring''' __a = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token super().__init__( vocab_file=_snake_case , tokenizer_file=_snake_case , bos_token=_snake_case , eos_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , src_lang=_snake_case , tgt_lang=_snake_case , additional_special_tokens=_snake_case , **_snake_case , ) __a = vocab_file __a = False if not self.vocab_file else True __a = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) __a = { lang_code: self.convert_tokens_to_ids(_snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __a = src_lang if src_lang is not None else '''en_XX''' __a = self.convert_tokens_to_ids(self._src_lang ) __a = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None: '''simple docstring''' __a = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> List[int]: '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case ) -> Any: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __a = src_lang __a = self(_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , **_snake_case ) __a = self.convert_tokens_to_ids(_snake_case ) __a = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = "en_XX" , _snake_case = None , _snake_case = "ro_RO" , **_snake_case , ) -> BatchEncoding: '''simple docstring''' __a = src_lang __a = tgt_lang return super().prepare_seqaseq_batch(_snake_case , _snake_case , **_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None: '''simple docstring''' __a = self.convert_tokens_to_ids(_snake_case ) __a = [] __a = [self.eos_token_id, self.cur_lang_code] __a = self.convert_ids_to_tokens(self.prefix_tokens ) __a = self.convert_ids_to_tokens(self.suffix_tokens ) __a = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None: '''simple docstring''' __a = self.convert_tokens_to_ids(_snake_case ) __a = [] __a = [self.eos_token_id, self.cur_lang_code] __a = self.convert_ids_to_tokens(self.prefix_tokens ) __a = self.convert_ids_to_tokens(self.suffix_tokens ) __a = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(_snake_case ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" ) return __a = os.path.join( _snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ): copyfile(self.vocab_file , _snake_case ) return (out_vocab_file,)
6
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""EncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TFEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""FlaxEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
0
def _snake_case( SCREAMING_SNAKE_CASE__ : list ) -> float: '''simple docstring''' A__ = 0 while len(SCREAMING_SNAKE_CASE__ ) > 1: A__ = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): A__ = files.index(min(SCREAMING_SNAKE_CASE__ ) ) temp += files[min_index] files.pop(SCREAMING_SNAKE_CASE__ ) files.append(SCREAMING_SNAKE_CASE__ ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
7
from string import ascii_lowercase, ascii_uppercase def a__ ( _UpperCamelCase : str ): if not sentence: return "" __lowerCamelCase = dict(zip(_UpperCamelCase ,_UpperCamelCase ) ) return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
330
0
import operator as op lowerCAmelCase_ = '''scaler.pt''' lowerCAmelCase_ = '''pytorch_model''' lowerCAmelCase_ = '''random_states''' lowerCAmelCase_ = '''optimizer''' lowerCAmelCase_ = '''scheduler''' lowerCAmelCase_ = '''pytorch_model.bin''' lowerCAmelCase_ = '''pytorch_model.bin.index.json''' lowerCAmelCase_ = '''model.safetensors''' lowerCAmelCase_ = '''model.safetensors.index.json''' lowerCAmelCase_ = '''1.10.2''' lowerCAmelCase_ = '''py38''' lowerCAmelCase_ = '''4.17.0''' lowerCAmelCase_ = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge'''] lowerCAmelCase_ = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2'''] lowerCAmelCase_ = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP'''] lowerCAmelCase_ = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH'''] lowerCAmelCase_ = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT'''] lowerCAmelCase_ = '''2.0.1''' lowerCAmelCase_ = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich'''] lowerCAmelCase_ = ['''default''', '''reduce-overhead''', '''max-autotune'''] lowerCAmelCase_ = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 lowerCAmelCase_ = [ '''nnodes''', '''nproc_per_node''', '''rdzv_backend''', '''rdzv_endpoint''', '''rdzv_id''', '''rdzv_conf''', '''standalone''', '''max_restarts''', '''monitor_interval''', '''start_method''', '''role''', '''module''', '''m''', '''no_python''', '''run_path''', '''log_dir''', '''r''', '''redirects''', '''t''', '''tee''', '''node_rank''', '''master_addr''', '''master_port''', ] lowerCAmelCase_ = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM'''] lowerCAmelCase_ = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
8
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __lowerCAmelCase ( lowerCAmelCase__ ): @slow @require_torch def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' ) __lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) __lowerCamelCase = bertabert.config.encoder.vocab_size __lowerCamelCase = tokenizer.sep_token_id __lowerCamelCase = tokenizer.cls_token_id __lowerCamelCase = 128 __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' ) __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' ) __lowerCamelCase = train_dataset.select(range(32 ) ) __lowerCamelCase = val_dataset.select(range(16 ) ) __lowerCamelCase = 4 def _map_to_encoder_decoder_inputs(__UpperCAmelCase ): # Tokenizer will automatically set [BOS] <text> [EOS] __lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 ) __lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 ) __lowerCamelCase = inputs.input_ids __lowerCamelCase = inputs.attention_mask __lowerCamelCase = outputs.input_ids __lowerCamelCase = outputs.input_ids.copy() __lowerCamelCase = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels'''] ] __lowerCamelCase = outputs.attention_mask assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids ) assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(__UpperCAmelCase ): __lowerCamelCase = pred.label_ids __lowerCamelCase = pred.predictions # all unnecessary tokens are removed __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase ) return {"accuracy": accuracy} # map train dataset __lowerCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) train_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) # same for validation dataset __lowerCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) val_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) __lowerCamelCase = self.get_auto_remove_tmp_dir() __lowerCamelCase = SeqaSeqTrainingArguments( output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __lowerCamelCase = SeqaSeqTrainer( model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , ) # start training trainer.train()
330
0
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging __lowerCAmelCase : Dict =logging.get_logger(__name__) class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''audio_values''', '''audio_mask'''] def __init__( self :Optional[Any] , lowerCAmelCase__ :str=2_048 , lowerCAmelCase__ :str=1 , lowerCAmelCase__ :List[Any]=[16, 16] , lowerCAmelCase__ :List[str]=128 , lowerCAmelCase__ :Dict=44_100 , lowerCAmelCase__ :Tuple=86 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Union[str, Any]=0.0 , **lowerCAmelCase__ :int , ) -> str: super().__init__( feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = spectrogram_length __SCREAMING_SNAKE_CASE : Dict = num_channels __SCREAMING_SNAKE_CASE : List[Any] = patch_size __SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1] __SCREAMING_SNAKE_CASE : Optional[Any] = n_fft __SCREAMING_SNAKE_CASE : int = sampling_rate // hop_length_to_sampling_rate __SCREAMING_SNAKE_CASE : Optional[int] = sampling_rate __SCREAMING_SNAKE_CASE : Tuple = padding_value __SCREAMING_SNAKE_CASE : Dict = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase__ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=lowerCAmelCase__ , norm='''slaney''' , mel_scale='''slaney''' , ).T def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :np.array ) -> np.ndarray: __SCREAMING_SNAKE_CASE : Union[str, Any] = spectrogram( lowerCAmelCase__ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , ) __SCREAMING_SNAKE_CASE : Tuple = log_spec[:, :-1] __SCREAMING_SNAKE_CASE : Tuple = log_spec - 20.0 __SCREAMING_SNAKE_CASE : List[str] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self :Dict , lowerCAmelCase__ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :Optional[bool] = True , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , **lowerCAmelCase__ :Union[str, Any] , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( '''This feature extractor is set to support sampling rate''' f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' f''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __SCREAMING_SNAKE_CASE : Any = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) __SCREAMING_SNAKE_CASE : str = is_batched_numpy or ( isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ): __SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(lowerCAmelCase__ , dtype=np.floataa ) elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __SCREAMING_SNAKE_CASE : str = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __SCREAMING_SNAKE_CASE : Optional[Any] = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , lowerCAmelCase__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __SCREAMING_SNAKE_CASE : Tuple = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __SCREAMING_SNAKE_CASE : Dict = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __SCREAMING_SNAKE_CASE : Optional[int] = np.array(lowerCAmelCase__ ).astype(np.floataa ) # convert into correct format for padding __SCREAMING_SNAKE_CASE : Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __SCREAMING_SNAKE_CASE : List[str] = np.ones([len(lowerCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __SCREAMING_SNAKE_CASE : Union[str, Any] = padded_audio_features * self.padding_value for i in range(len(lowerCAmelCase__ ) ): __SCREAMING_SNAKE_CASE : Dict = audio_features[i] __SCREAMING_SNAKE_CASE : str = feature # return as BatchFeature if return_attention_mask: __SCREAMING_SNAKE_CASE : Dict = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask} else: __SCREAMING_SNAKE_CASE : List[str] = {'''audio_values''': padded_audio_features} __SCREAMING_SNAKE_CASE : Any = BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ ) return encoded_inputs
9
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __A = { "configuration_pix2struct": [ "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Pix2StructConfig", "Pix2StructTextConfig", "Pix2StructVisionConfig", ], "processing_pix2struct": ["Pix2StructProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["Pix2StructImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST", "Pix2StructPreTrainedModel", "Pix2StructForConditionalGeneration", "Pix2StructVisionModel", "Pix2StructTextModel", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=1000 ): '''simple docstring''' __lowerCamelCase = p_stop __lowerCamelCase = max_length def __iter__( self ): '''simple docstring''' __lowerCamelCase = 0 __lowerCamelCase = False while not stop and count < self.max_length: yield count count += 1 __lowerCamelCase = random.random() < self.p_stop class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = [ BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 ) ] __lowerCamelCase = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] __lowerCamelCase = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ): '''simple docstring''' random.seed(__UpperCAmelCase ) __lowerCamelCase = list(__UpperCAmelCase ) __lowerCamelCase = [ IterableDatasetShard( __UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , ) for i in range(__UpperCAmelCase ) ] __lowerCamelCase = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(__UpperCAmelCase ) iterable_dataset_lists.append(list(__UpperCAmelCase ) ) __lowerCamelCase = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size __lowerCamelCase = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 ) __lowerCamelCase = [] for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(__UpperCAmelCase ) < len(__UpperCAmelCase ): reference += reference self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = 42 __lowerCamelCase = RandomIterableDataset() self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) # Edge case with a very small dataset __lowerCamelCase = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = SkipBatchSampler(__UpperCAmelCase , 2 ) self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 ) __lowerCamelCase = skip_first_batches(__UpperCAmelCase , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def lowerCamelCase ( self ): '''simple docstring''' Accelerator() __lowerCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
330
0
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors lowerCAmelCase__ = logging.getLogger(__name__) class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "sequence-classification" def __init__( self , __lowerCamelCase) -> List[Any]: if type(__lowerCamelCase) == dict: _A : Tuple = Namespace(**__lowerCamelCase) _A : Tuple = glue_output_modes[hparams.task] _A : Union[str, Any] = glue_tasks_num_labels[hparams.task] super().__init__(__lowerCamelCase , __lowerCamelCase , self.mode) def _lowerCamelCase ( self , **__lowerCamelCase) -> List[str]: return self.model(**__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> str: _A : List[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A : Union[str, Any] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None _A : Union[str, Any] = self(**__lowerCamelCase) _A : str = outputs[0] _A : Optional[Any] = self.trainer.lr_schedulers[0]["scheduler"] _A : Tuple = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def _lowerCamelCase ( self) -> Dict: _A : Tuple = self.hparams _A : Dict = processors[args.task]() _A : str = processor.get_labels() for mode in ["train", "dev"]: _A : Optional[int] = self._feature_file(__lowerCamelCase) if os.path.exists(__lowerCamelCase) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , __lowerCamelCase) else: logger.info("Creating features from dataset file at %s" , args.data_dir) _A : Any = ( processor.get_dev_examples(args.data_dir) if mode == "dev" else processor.get_train_examples(args.data_dir) ) _A : Union[str, Any] = convert_examples_to_features( __lowerCamelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("Saving features into cached file %s" , __lowerCamelCase) torch.save(__lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False) -> DataLoader: _A : Dict = "dev" if mode == "test" else mode _A : Optional[int] = self._feature_file(__lowerCamelCase) logger.info("Loading features from cached file %s" , __lowerCamelCase) _A : str = torch.load(__lowerCamelCase) _A : Union[str, Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long) _A : Dict = torch.tensor([f.attention_mask for f in features] , dtype=torch.long) _A : Tuple = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long) if self.hparams.glue_output_mode == "classification": _A : Optional[Any] = torch.tensor([f.label for f in features] , dtype=torch.long) elif self.hparams.glue_output_mode == "regression": _A : int = torch.tensor([f.label for f in features] , dtype=torch.float) return DataLoader( TensorDataset(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) , batch_size=__lowerCamelCase , shuffle=__lowerCamelCase , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Any: _A : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _A : Union[str, Any] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None _A : Union[str, Any] = self(**__lowerCamelCase) _A , _A : Union[str, Any] = outputs[:2] _A : Any = logits.detach().cpu().numpy() _A : int = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _lowerCamelCase ( self , __lowerCamelCase) -> tuple: _A : int = torch.stack([x["val_loss"] for x in outputs]).mean().detach().cpu().item() _A : Optional[Any] = np.concatenate([x["pred"] for x in outputs] , axis=0) if self.hparams.glue_output_mode == "classification": _A : int = np.argmax(__lowerCamelCase , axis=1) elif self.hparams.glue_output_mode == "regression": _A : Tuple = np.squeeze(__lowerCamelCase) _A : Any = np.concatenate([x["target"] for x in outputs] , axis=0) _A : Optional[Any] = [[] for _ in range(out_label_ids.shape[0])] _A : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0])] _A : str = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCamelCase , __lowerCamelCase)} _A : List[Any] = dict(results.items()) _A : str = results return ret, preds_list, out_label_list def _lowerCamelCase ( self , __lowerCamelCase) -> dict: _A , _A , _A : Tuple = self._eval_end(__lowerCamelCase) _A : List[Any] = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _lowerCamelCase ( self , __lowerCamelCase) -> dict: _A , _A , _A : int = self._eval_end(__lowerCamelCase) _A : Any = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase) -> List[str]: BaseTransformer.add_model_specific_args(__lowerCamelCase , __lowerCamelCase) parser.add_argument( "--max_seq_length" , default=1_2_8 , type=__lowerCamelCase , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--task" , default="" , type=__lowerCamelCase , required=__lowerCamelCase , help="The GLUE task to run" , ) parser.add_argument( "--gpus" , default=0 , type=__lowerCamelCase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets") return parser def _UpperCAmelCase (): _A : Optional[Any] = argparse.ArgumentParser() add_generic_args(UpperCamelCase__ , os.getcwd() ) _A : Optional[int] = GLUETransformer.add_model_specific_args(UpperCamelCase__ , os.getcwd() ) _A : List[str] = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: _A : Any = os.path.join( "./results" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , ) os.makedirs(args.output_dir ) _A : Union[str, Any] = GLUETransformer(UpperCamelCase__ ) _A : Optional[int] = generic_train(UpperCamelCase__ , UpperCamelCase__ ) # Optionally, predict on dev set and write to output_dir if args.do_predict: _A : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=UpperCamelCase__ ) ) _A : Dict = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(UpperCamelCase__ ) if __name__ == "__main__": main()
11
def a__ ( _UpperCamelCase : int ): __lowerCamelCase = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
330
0
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration UpperCAmelCase_ = 50_000 UpperCAmelCase_ = 5_000 UpperCAmelCase_ , UpperCAmelCase_ = os.path.split(__file__) UpperCAmelCase_ = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json')) @get_duration def lowerCamelCase__ ( A__ : datasets.Dataset , A__ : Optional[int] ): '''simple docstring''' for i in range(A__ ): __lowerCamelCase = dataset[i] @get_duration def lowerCamelCase__ ( A__ : datasets.Dataset , A__ : Union[str, Any] , A__ : str ): '''simple docstring''' for i in range(0 , len(A__ ) , A__ ): __lowerCamelCase = dataset[i : i + batch_size] @get_duration def lowerCamelCase__ ( A__ : datasets.Dataset , A__ : Optional[int] , A__ : Union[str, Any] ): '''simple docstring''' with dataset.formatted_as(type=A__ ): for i in range(A__ ): __lowerCamelCase = dataset[i] @get_duration def lowerCamelCase__ ( A__ : datasets.Dataset , A__ : int , A__ : Optional[int] , A__ : Any ): '''simple docstring''' with dataset.formatted_as(type=A__ ): for i in range(0 , A__ , A__ ): __lowerCamelCase = dataset[i : i + batch_size] def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = {"""num examples""": SPEED_TEST_N_EXAMPLES} __lowerCamelCase = [ (read, {"""length""": SMALL_TEST}), (read, {"""length""": SPEED_TEST_N_EXAMPLES}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}), (read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}), ] __lowerCamelCase = [ (read, {"""length""": SMALL_TEST}), (read, {"""length""": SPEED_TEST_N_EXAMPLES}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}), (read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print("""generating dataset""" ) __lowerCamelCase = datasets.Features( {"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} ) __lowerCamelCase = generate_example_dataset( os.path.join(A__ , """dataset.arrow""" ) , A__ , num_examples=A__ , seq_shapes={"""list""": (100,)} , ) print("""first set of iterations""" ) for func, kwargs in functions: print(func.__name__ , str(A__ ) ) __lowerCamelCase = func(A__ , **A__ ) print("""shuffling dataset""" ) __lowerCamelCase = dataset.shuffle() print("""Second set of iterations (after shuffling""" ) for func, kwargs in functions_shuffled: print("""shuffled """ , func.__name__ , str(A__ ) ) __lowerCamelCase = func( A__ , **A__ ) with open(A__ , """wb""" ) as f: f.write(json.dumps(A__ ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
12
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ = 16 a_ = 32 def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : int = 16 ): __lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' ) def tokenize_function(_UpperCamelCase : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCamelCase = datasets.map( _UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' ) def collate_fn(_UpperCamelCase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCamelCase = 16 elif accelerator.mixed_precision != "no": __lowerCamelCase = 8 else: __lowerCamelCase = None return tokenizer.pad( _UpperCamelCase ,padding='''longest''' ,max_length=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_tensors='''pt''' ,) # Instantiate dataloaders. __lowerCamelCase = DataLoader( tokenized_datasets['''train'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) __lowerCamelCase = DataLoader( tokenized_datasets['''validation'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders a_ = mocked_dataloaders # noqa: F811 def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,_UpperCamelCase ) == "1": __lowerCamelCase = 2 # Initialize accelerator __lowerCamelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCamelCase = config['''lr'''] __lowerCamelCase = int(config['''num_epochs'''] ) __lowerCamelCase = int(config['''seed'''] ) __lowerCamelCase = int(config['''batch_size'''] ) __lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=_UpperCamelCase ) def inner_training_loop(_UpperCamelCase : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(_UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCamelCase = model.to(accelerator.device ) # Instantiate optimizer __lowerCamelCase = AdamW(params=model.parameters() ,lr=_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = get_dataloaders(_UpperCamelCase ,_UpperCamelCase ) # Instantiate scheduler __lowerCamelCase = get_linear_schedule_with_warmup( optimizer=_UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(_UpperCamelCase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = accelerator.prepare( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) # Now we train the model for epoch in range(_UpperCamelCase ): model.train() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.loss accelerator.backward(_UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.logits.argmax(dim=-1 ) __lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_UpperCamelCase ,references=_UpperCamelCase ,) __lowerCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" ,_UpperCamelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a__ ( ): __lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' ,) parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_UpperCamelCase ,_UpperCamelCase ) if __name__ == "__main__": main()
330
0
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = image.size SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 SCREAMING_SNAKE_CASE_: Tuple = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) SCREAMING_SNAKE_CASE_: str = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_5_5.0 SCREAMING_SNAKE_CASE_: Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 ) SCREAMING_SNAKE_CASE_: str = torch.from_numpy(_UpperCAmelCase ) return 2.0 * image - 1.0 class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : VQModel , lowerCAmelCase__ : UNetaDModel , lowerCAmelCase__ : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): super().__init__() self.register_modules(vqvae=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__) @torch.no_grad() def __call__( self : str , lowerCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 100 , lowerCAmelCase__ : Optional[float] = 0.0 , lowerCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , ): if isinstance(lowerCAmelCase__ , PIL.Image.Image): SCREAMING_SNAKE_CASE_: List[str] = 1 elif isinstance(lowerCAmelCase__ , torch.Tensor): SCREAMING_SNAKE_CASE_: str = image.shape[0] else: raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase__)}") if isinstance(lowerCAmelCase__ , PIL.Image.Image): SCREAMING_SNAKE_CASE_: Dict = preprocess(lowerCAmelCase__) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image SCREAMING_SNAKE_CASE_: List[str] = (batch_size, self.unet.config.in_channels // 2, height, width) SCREAMING_SNAKE_CASE_: List[str] = next(self.unet.parameters()).dtype SCREAMING_SNAKE_CASE_: Dict = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = image.to(device=self.device , dtype=lowerCAmelCase__) # set timesteps and move to the correct device self.scheduler.set_timesteps(lowerCAmelCase__ , device=self.device) SCREAMING_SNAKE_CASE_: List[str] = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler SCREAMING_SNAKE_CASE_: List[str] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] SCREAMING_SNAKE_CASE_: List[str] = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) SCREAMING_SNAKE_CASE_: Optional[Any] = {} if accepts_eta: SCREAMING_SNAKE_CASE_: int = eta for t in self.progress_bar(lowerCAmelCase__): # concat latents and low resolution image in the channel dimension. SCREAMING_SNAKE_CASE_: Optional[Any] = torch.cat([latents, image] , dim=1) SCREAMING_SNAKE_CASE_: List[Any] = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__) # predict the noise residual SCREAMING_SNAKE_CASE_: Union[str, Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__).sample # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE_: int = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample # decode the image latents with the VQVAE SCREAMING_SNAKE_CASE_: int = self.vqvae.decode(lowerCAmelCase__).sample SCREAMING_SNAKE_CASE_: Optional[Any] = torch.clamp(lowerCAmelCase__ , -1.0 , 1.0) SCREAMING_SNAKE_CASE_: Optional[int] = image / 2 + 0.5 SCREAMING_SNAKE_CASE_: Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_: Dict = self.numpy_to_pil(lowerCAmelCase__) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase__)
13
import logging import os import threading import time try: import warnings except ImportError: a_ = None try: import msvcrt except ImportError: a_ = None try: import fcntl except ImportError: a_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: a_ = OSError # Data # ------------------------------------------------ a_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] a_ = """3.0.12""" a_ = None def a__ ( ): global _logger __lowerCamelCase = _logger or logging.getLogger(__name__ ) return _logger class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock_file return None def __str__( self ): '''simple docstring''' __lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class __lowerCAmelCase : def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock return None def __enter__( self ): '''simple docstring''' return self.lock def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.lock.release() return None class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase ) # The path to the lock file. __lowerCamelCase = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __lowerCamelCase = None # The default timeout value. __lowerCamelCase = timeout # We use this lock primarily for the lock counter. __lowerCamelCase = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __lowerCamelCase = 0 return None @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file @property def lowerCamelCase ( self ): '''simple docstring''' return self._timeout @timeout.setter def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = float(__UpperCAmelCase ) return None def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file_fd is not None def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ): '''simple docstring''' # Use the default timeout, if no timeout is provided. if timeout is None: __lowerCamelCase = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file __lowerCamelCase = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(__UpperCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __lowerCamelCase = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def lowerCamelCase ( self , __UpperCAmelCase=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __lowerCamelCase = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self ): '''simple docstring''' self.acquire() return self def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.release() return None def __del__( self ): '''simple docstring''' self.release(force=__UpperCAmelCase ) return None def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = os.path.basename(__UpperCAmelCase ) if len(__UpperCAmelCase ) > max_length and max_length > 0: __lowerCamelCase = os.path.dirname(__UpperCAmelCase ) __lowerCamelCase = str(hash(__UpperCAmelCase ) ) __lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(__UpperCAmelCase , __UpperCAmelCase ) else: return path class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) __lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: try: msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(__UpperCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) try: fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN ) os.close(__UpperCAmelCase ) return None class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' os.close(self._lock_file_fd ) __lowerCamelCase = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None a_ = None if msvcrt: a_ = WindowsFileLock elif fcntl: a_ = UnixFileLock else: a_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
330
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowerCamelCase : List[Any] = logging.get_logger(__name__) _lowerCamelCase : Union[str, Any] = { """shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""", # See all Nat models at https://huggingface.co/models?filter=nat } class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''nat''' UpperCAmelCase__ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Dict , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : Optional[int]=64 , UpperCAmelCase__ : Dict=[3, 4, 6, 5] , UpperCAmelCase__ : Dict=[2, 4, 8, 16] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : List[Any]=3.0 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[Any]=1e-5 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Any , ) ->str: '''simple docstring''' super().__init__(**UpperCAmelCase__) A__ = patch_size A__ = num_channels A__ = embed_dim A__ = depths A__ = len(UpperCAmelCase__) A__ = num_heads A__ = kernel_size A__ = mlp_ratio A__ = qkv_bias A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = drop_path_rate A__ = hidden_act A__ = layer_norm_eps A__ = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model A__ = int(embed_dim * 2 ** (len(UpperCAmelCase__) - 1)) A__ = layer_scale_init_value A__ = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase__) + 1)] A__ , A__ = get_aligned_output_features_output_indices( out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names)
14
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase="divided_space_time" , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = num_channels __lowerCamelCase = patch_size __lowerCamelCase = num_frames __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = attention_type __lowerCamelCase = initializer_range __lowerCamelCase = scope __lowerCamelCase = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __lowerCamelCase = (image_size // patch_size) ** 2 __lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __lowerCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __lowerCamelCase = self.num_labels return config def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) # verify the logits shape __lowerCamelCase = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = config_and_inputs __lowerCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowerCAmelCase__ = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerModelTester(self ) __lowerCamelCase = ConfigTester( self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = copy.deepcopy(__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def lowerCamelCase ( self ): '''simple docstring''' pass def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = TimesformerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' if not self.has_attentions: pass else: __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase = True for model_class in self.all_model_classes: __lowerCamelCase = self.model_tester.seq_length __lowerCamelCase = self.model_tester.num_frames __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __lowerCamelCase = len(__UpperCAmelCase ) # Check attention is always last and order is fine __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(out_len + 1 , len(__UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowerCamelCase ( self ): '''simple docstring''' def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.hidden_states __lowerCamelCase = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) __lowerCamelCase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def a__ ( ): __lowerCamelCase = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' ) __lowerCamelCase = np.load(_UpperCamelCase ) return list(_UpperCamelCase ) @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self ): '''simple docstring''' # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( __UpperCAmelCase ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_video() __lowerCamelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**__UpperCAmelCase ) # verify the logits __lowerCamelCase = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
0
class UpperCAmelCase : '''simple docstring''' def __init__( self : int ,A : int ): __A = n __A = [None] * self.n __A = 0 # index of the first element __A = 0 __A = 0 def __len__( self : Tuple ): return self.size def UpperCamelCase_ ( self : int ): return self.size == 0 def UpperCamelCase_ ( self : str ): return False if self.is_empty() else self.array[self.front] def UpperCamelCase_ ( self : Union[str, Any] ,A : Dict ): if self.size >= self.n: raise Exception("QUEUE IS FULL" ) __A = data __A = (self.rear + 1) % self.n self.size += 1 return self def UpperCamelCase_ ( self : Union[str, Any] ): if self.size == 0: raise Exception("UNDERFLOW" ) __A = self.array[self.front] __A = None __A = (self.front + 1) % self.n self.size -= 1 return temp
15
def a__ ( _UpperCamelCase : int ): if not isinstance(_UpperCamelCase ,_UpperCamelCase ): __lowerCamelCase = F"""Input value of [number={number}] must be an integer""" raise TypeError(_UpperCamelCase ) if number < 0: return False __lowerCamelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
330
0
"""simple docstring""" def __UpperCAmelCase ( __lowerCamelCase = 1_00 ) -> int: lowercase__ : Optional[Any] = n * (n + 1) * (2 * n + 1) / 6 lowercase__ : List[Any] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'''{solution() = }''')
16
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={"_".join([str(__UpperCAmelCase ) for s in shape] )}.npy""" def lowerCamelCase ( self ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 4, 64, 64) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return image def lowerCamelCase ( self , __UpperCAmelCase=False , __UpperCAmelCase="CompVis/stable-diffusion-v1-4" ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = '''bf16''' if fpaa else None __lowerCamelCase ,__lowerCamelCase = FlaxUNetaDConditionModel.from_pretrained( __UpperCAmelCase , subfolder='''unet''' , dtype=__UpperCAmelCase , revision=__UpperCAmelCase ) return model, params def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 77, 768) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]], [17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]], [8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]], [3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]], [17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]], [8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]], [3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , shape=(4, 4, 96, 96) , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , shape=(4, 77, 1024) , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
330
0
"""simple docstring""" from math import factorial _a = {str(d): factorial(d) for d in range(10)} def _A ( UpperCamelCase_ : int) -> int: '''simple docstring''' return sum(DIGIT_FACTORIAL[d] for d in str(UpperCamelCase_)) def _A ( ) -> int: '''simple docstring''' __lowercase = 7 * factorial(9) + 1 return sum(i for i in range(3, UpperCamelCase_) if sum_of_digit_factorial(UpperCamelCase_) == i) if __name__ == "__main__": print(F"{solution() = }")
17
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer __lowerCamelCase : Tuple = logging.get_logger(__name__) __lowerCamelCase : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __lowerCamelCase : List[str] = { '''vocab_file''': { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt''' ), '''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''', '''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''', '''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''', '''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json''' ), '''google/realm-orqa-nq-openqa''': ( '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json''' ), '''google/realm-orqa-nq-reader''': ( '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json''' ), '''google/realm-orqa-wq-openqa''': ( '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json''' ), '''google/realm-orqa-wq-reader''': ( '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json''' ), }, } __lowerCamelCase : Union[str, Any] = { '''google/realm-cc-news-pretrained-embedder''': 5_12, '''google/realm-cc-news-pretrained-encoder''': 5_12, '''google/realm-cc-news-pretrained-scorer''': 5_12, '''google/realm-cc-news-pretrained-openqa''': 5_12, '''google/realm-orqa-nq-openqa''': 5_12, '''google/realm-orqa-nq-reader''': 5_12, '''google/realm-orqa-wq-openqa''': 5_12, '''google/realm-orqa-wq-reader''': 5_12, } __lowerCamelCase : Optional[Any] = { '''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-nq-reader''': {'''do_lower_case''': True}, '''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-wq-reader''': {'''do_lower_case''': True}, } class a__ ( A__ ): A = VOCAB_FILES_NAMES A = PRETRAINED_VOCAB_FILES_MAP A = PRETRAINED_INIT_CONFIGURATION A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A = RealmTokenizer def __init__( self : Any,_A : Tuple=None,_A : Union[str, Any]=None,_A : str=True,_A : Tuple="[UNK]",_A : List[str]="[SEP]",_A : List[str]="[PAD]",_A : int="[CLS]",_A : Dict="[MASK]",_A : str=True,_A : int=None,**_A : List[str],): """simple docstring""" super().__init__( _A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,) SCREAMING_SNAKE_CASE_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase",_A ) != do_lower_case or normalizer_state.get("strip_accents",_A ) != strip_accents or normalizer_state.get("handle_chinese_chars",_A ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(_A,normalizer_state.pop("type" ) ) SCREAMING_SNAKE_CASE_ : int = do_lower_case SCREAMING_SNAKE_CASE_ : Union[str, Any] = strip_accents SCREAMING_SNAKE_CASE_ : int = tokenize_chinese_chars SCREAMING_SNAKE_CASE_ : str = normalizer_class(**_A ) SCREAMING_SNAKE_CASE_ : Tuple = do_lower_case def __UpperCamelCase ( self : Tuple,_A : List[Any],**_A : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = PaddingStrategy.MAX_LENGTH SCREAMING_SNAKE_CASE_ : Dict = text SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop("text_pair",_A ) SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop("return_tensors",_A ) SCREAMING_SNAKE_CASE_ : int = { "input_ids": [], "attention_mask": [], "token_type_ids": [], } for idx, candidate_text in enumerate(_A ): if batch_text_pair is not None: SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_text_pair[idx] else: SCREAMING_SNAKE_CASE_ : Optional[int] = None SCREAMING_SNAKE_CASE_ : Optional[int] = super().__call__(_A,_A,return_tensors=_A,**_A ) SCREAMING_SNAKE_CASE_ : Optional[int] = encoded_candidates.get("input_ids" ) SCREAMING_SNAKE_CASE_ : Optional[Any] = encoded_candidates.get("attention_mask" ) SCREAMING_SNAKE_CASE_ : Optional[int] = encoded_candidates.get("token_type_ids" ) if encoded_input_ids is not None: output_data["input_ids"].append(_A ) if encoded_attention_mask is not None: output_data["attention_mask"].append(_A ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(_A ) SCREAMING_SNAKE_CASE_ : Any = {key: item for key, item in output_data.items() if len(_A ) != 0} return BatchEncoding(_A,tensor_type=_A ) def __UpperCamelCase ( self : Dict,_A : Optional[Any],_A : Union[str, Any]=None ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCamelCase ( self : Optional[int],_A : List[int],_A : Optional[List[int]] = None ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id] SCREAMING_SNAKE_CASE_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCamelCase ( self : Any,_A : str,_A : Optional[str] = None ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = self._tokenizer.model.save(_A,name=_A ) return tuple(_A )
18
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def a__ ( _UpperCamelCase : Optional[int] ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __lowerCamelCase = module __lowerCamelCase = nn.Sequential( nn.Linear(module.in_features , __UpperCAmelCase , bias=__UpperCAmelCase ) , nn.Linear(__UpperCAmelCase , module.out_features , bias=__UpperCAmelCase ) , ) __lowerCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__UpperCAmelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowerCamelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.module(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) + self.adapter(__UpperCAmelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module lowerCAmelCase__ = """bigscience/bloom-1b7""" # Constant values lowerCAmelCase__ = 2.1_09_65_95_52_69_25_74 lowerCAmelCase__ = """Hello my name is""" lowerCAmelCase__ = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) lowerCAmelCase__ = 1_0 def lowerCamelCase ( self ): '''simple docstring''' # Models and tokenizer __lowerCamelCase = AutoTokenizer.from_pretrained(self.model_name ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # Models and tokenizer __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='''auto''' ) __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_abit.config self.assertTrue(hasattr(__UpperCAmelCase , '''quantization_config''' ) ) __lowerCamelCase = config.to_dict() __lowerCamelCase = config.to_diff_dict() __lowerCamelCase = config.to_json_string() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit __lowerCamelCase = self.model_fpaa.get_memory_footprint() __lowerCamelCase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __lowerCamelCase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__UpperCAmelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() __lowerCamelCase = True __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = model_abit_from_config.generate( input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() with self.assertRaises(__UpperCAmelCase ): __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ): # Tries with `str` self.model_abit.to('''cpu''' ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.to(torch.device('''cuda:0''' ) ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_fpaa.to(torch.floataa ) __lowerCamelCase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.to('''cpu''' ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.half() # Check this does not throw an error __lowerCamelCase = self.model_fpaa.float() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): @classmethod def lowerCamelCase ( cls ): '''simple docstring''' __lowerCamelCase = '''t5-small''' __lowerCamelCase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense __lowerCamelCase = AutoTokenizer.from_pretrained(cls.model_name ) __lowerCamelCase = '''Translate in German: Hello, my dog is cute''' def lowerCamelCase ( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaForConditionalGeneration __lowerCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules __lowerCamelCase = None # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) __lowerCamelCase = modules def lowerCamelCase ( self ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # model_name __lowerCamelCase = '''bigscience/bloom-560m''' __lowerCamelCase = '''t5-small''' # Different types of model __lowerCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Sequence classification model __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # CausalLM model __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Seq2seq model __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = pipeline( '''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __lowerCamelCase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''balanced''' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) # Second real batch __lowerCamelCase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = '''facebook/opt-350m''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ): return # Step 1: freeze all parameters __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __lowerCamelCase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __lowerCamelCase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__UpperCAmelCase ) ): __lowerCamelCase = LoRALayer(module.q_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.k_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __lowerCamelCase = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __lowerCamelCase = model.forward(**__UpperCAmelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__UpperCAmelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = """gpt2-xl""" lowerCAmelCase__ = 3.31_91_85_48_54_15_21_87
330
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _SCREAMING_SNAKE_CASE : def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[str]: lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope lowerCamelCase_ = self.vocab_size - 1 def SCREAMING_SNAKE_CASE_( self ) -> Any: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCamelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict: lowerCamelCase_ = OpenAIGPTModel(config=lowercase ) model.to(lowercase ) model.eval() lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , head_mask=lowercase ) lowerCamelCase_ = model(lowercase , token_type_ids=lowercase ) lowerCamelCase_ = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int: lowerCamelCase_ = OpenAIGPTLMHeadModel(lowercase ) model.to(lowercase ) model.eval() lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict: lowerCamelCase_ = OpenAIGPTDoubleHeadsModel(lowercase ) model.to(lowercase ) model.eval() lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int: lowerCamelCase_ = self.num_labels lowerCamelCase_ = OpenAIGPTForSequenceClassification(lowercase ) model.to(lowercase ) model.eval() lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]: lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): lowerCAmelCase__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) lowerCAmelCase__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly lowerCAmelCase__ = ( { 'feature-extraction': OpenAIGPTModel, 'text-classification': OpenAIGPTForSequenceClassification, 'text-generation': OpenAIGPTLMHeadModel, 'zero-shot': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> Any: lowerCamelCase_ = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCamelCase_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase , ) lowerCamelCase_ = inputs_dict["labels"] lowerCamelCase_ = inputs_dict["labels"] lowerCamelCase_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase , ) lowerCamelCase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase ) return inputs_dict def SCREAMING_SNAKE_CASE_( self ) -> Dict: lowerCamelCase_ = OpenAIGPTModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=lowercase , n_embd=37 ) def SCREAMING_SNAKE_CASE_( self ) -> Any: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> str: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> Tuple: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase ) @slow def SCREAMING_SNAKE_CASE_( self ) -> Any: for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = OpenAIGPTModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE_( self ) -> str: lowerCamelCase_ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(lowercase ) lowerCamelCase_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase ) # the president is lowerCamelCase_ = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCamelCase_ = model.generate(lowercase , do_sample=lowercase ) self.assertListEqual(output_ids[0].tolist() , lowercase )
19
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = 42 class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ = True @register_to_config def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (64,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 4 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 0.18_215 , ): '''simple docstring''' super().__init__() # pass init params to Encoder __lowerCamelCase = Encoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , ) # pass init params to Decoder __lowerCamelCase = Decoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , ) __lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __lowerCamelCase = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 ) __lowerCamelCase = False __lowerCamelCase = False # only relevant if vae tiling is enabled __lowerCamelCase = self.config.sample_size __lowerCamelCase = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __lowerCamelCase = 0.25 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' if isinstance(__UpperCAmelCase , (Encoder, Decoder) ): __lowerCamelCase = value def lowerCamelCase ( self , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = use_tiling def lowerCamelCase ( self ): '''simple docstring''' self.enable_tiling(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = True def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = {} def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): __lowerCamelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return processors def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = len(self.attn_processors.keys() ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count: raise ValueError( F"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the""" F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): module.set_processor(__UpperCAmelCase ) else: module.set_processor(processor.pop(F"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) if self.use_slicing and x.shape[0] > 1: __lowerCamelCase = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_slicing and z.shape[0] > 1: __lowerCamelCase = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self._decode(__UpperCAmelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[2] , b.shape[2] , __UpperCAmelCase ) for y in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[3] , b.shape[3] , __UpperCAmelCase ) for x in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __lowerCamelCase = [] for i in range(0 , x.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , x.shape[3] , __UpperCAmelCase ): __lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __lowerCamelCase = [] for i in range(0 , z.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , z.shape[3] , __UpperCAmelCase ): __lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ): '''simple docstring''' __lowerCamelCase = sample __lowerCamelCase = self.encode(__UpperCAmelCase ).latent_dist if sample_posterior: __lowerCamelCase = posterior.sample(generator=__UpperCAmelCase ) else: __lowerCamelCase = posterior.mode() __lowerCamelCase = self.decode(__UpperCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase )
330
0
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float: return round(float(moles / volume ) * nfactor ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float: return round(float((moles * 0.0821 * temperature) / (volume) ) ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float: return round(float((moles * 0.0821 * temperature) / (pressure) ) ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float: return round(float((pressure * volume) / (0.0821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
20
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration a_ = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] a_ = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] a_ = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) a_ = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) a_ = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ): for tf_name, hf_name in patterns: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase ) __lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() __lowerCamelCase = {} # separating decoder weights __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = DECODER_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = REMAINING_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" __lowerCamelCase = mapping['''model.embed_positions.weight'''] __lowerCamelCase = mapping.pop('''model.embed_positions.weight''' ) __lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : int ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ): __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() a_ = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
330
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE : str = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : List[Any] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch a_ = logging.get_logger(__name__) class __lowerCAmelCase : def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase=None ): '''simple docstring''' if not conversation_id: __lowerCamelCase = uuid.uuida() if past_user_inputs is None: __lowerCamelCase = [] if generated_responses is None: __lowerCamelCase = [] __lowerCamelCase = conversation_id __lowerCamelCase = past_user_inputs __lowerCamelCase = generated_responses __lowerCamelCase = text def __eq__( self , __UpperCAmelCase ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) __lowerCamelCase = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: __lowerCamelCase = text def lowerCamelCase ( self ): '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __lowerCamelCase = None def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' self.generated_responses.append(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): '''simple docstring''' __lowerCamelCase = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): __lowerCamelCase = '''user''' if is_user else '''bot''' output += F"""{name} >> {text} \n""" return output @add_end_docstrings( lowerCAmelCase__ , r""" min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. """ , ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) if self.tokenizer.pad_token_id is None: __lowerCamelCase = self.tokenizer.eos_token def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = {} __lowerCamelCase = {} __lowerCamelCase = {} if min_length_for_response is not None: __lowerCamelCase = min_length_for_response if minimum_tokens is not None: __lowerCamelCase = minimum_tokens if "max_length" in generate_kwargs: __lowerCamelCase = generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __lowerCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self , __UpperCAmelCase , __UpperCAmelCase=0 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = super().__call__(__UpperCAmelCase , num_workers=__UpperCAmelCase , **__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=32 ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ): __lowerCamelCase = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version __lowerCamelCase = self._legacy_parse_and_tokenize(__UpperCAmelCase ) if self.framework == "pt": __lowerCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": __lowerCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=10 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) __lowerCamelCase = model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) __lowerCamelCase = max_length - minimum_tokens __lowerCamelCase = model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: __lowerCamelCase = model_inputs['''attention_mask'''][:, -trim:] __lowerCamelCase = model_inputs.pop('''conversation''' ) __lowerCamelCase = max_length __lowerCamelCase = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase ) if self.model.config.is_encoder_decoder: __lowerCamelCase = 1 else: __lowerCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = model_outputs['''output_ids'''] __lowerCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , ) __lowerCamelCase = model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(__UpperCAmelCase ) return conversation def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.tokenizer.eos_token_id __lowerCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > self.tokenizer.model_max_length: __lowerCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
330
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE :str = { '''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''], '''tokenization_roformer''': ['''RoFormerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE :Optional[Any] = ['''RoFormerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE :int = [ '''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoFormerForCausalLM''', '''RoFormerForMaskedLM''', '''RoFormerForMultipleChoice''', '''RoFormerForQuestionAnswering''', '''RoFormerForSequenceClassification''', '''RoFormerForTokenClassification''', '''RoFormerLayer''', '''RoFormerModel''', '''RoFormerPreTrainedModel''', '''load_tf_weights_in_roformer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE :List[Any] = [ '''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRoFormerForCausalLM''', '''TFRoFormerForMaskedLM''', '''TFRoFormerForMultipleChoice''', '''TFRoFormerForQuestionAnswering''', '''TFRoFormerForSequenceClassification''', '''TFRoFormerForTokenClassification''', '''TFRoFormerLayer''', '''TFRoFormerModel''', '''TFRoFormerPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE :int = [ '''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxRoFormerForMaskedLM''', '''FlaxRoFormerForMultipleChoice''', '''FlaxRoFormerForQuestionAnswering''', '''FlaxRoFormerForSequenceClassification''', '''FlaxRoFormerForTokenClassification''', '''FlaxRoFormerModel''', '''FlaxRoFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params a_ = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["""memory_attention""", """encoder_attn"""], ["""attention""", """attn"""], ["""/""", """."""], [""".LayerNorm.gamma""", """_layer_norm.weight"""], [""".LayerNorm.beta""", """_layer_norm.bias"""], ["""r.layer_""", """r.layers."""], ["""output_proj""", """out_proj"""], ["""ffn.dense_1.""", """fc2."""], ["""ffn.dense.""", """fc1."""], ["""ffn_layer_norm""", """final_layer_norm"""], ["""kernel""", """weight"""], ["""encoder_layer_norm.""", """encoder.layer_norm."""], ["""decoder_layer_norm.""", """decoder.layer_norm."""], ["""embeddings.weights""", """shared.weight"""], ] def a__ ( _UpperCamelCase : int ): for pegasus_name, hf_name in PATTERNS: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = DEFAULTS.copy() cfg_kwargs.update(_UpperCamelCase ) __lowerCamelCase = PegasusConfig(**_UpperCamelCase ) __lowerCamelCase = PegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.model.state_dict() __lowerCamelCase = {} for k, v in tf_weights.items(): __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ) if new_k not in sd: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if "dense" in k or "proj" in new_k: __lowerCamelCase = v.T __lowerCamelCase = torch.tensor(_UpperCamelCase ,dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}""" # make sure embedding.padding_idx is respected __lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] ) __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping} mapping.update(**_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = torch_model.model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight'''] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : str="./ckpt/aeslc/model.ckpt-32000" ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''Adafactor''', '''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # save tokenizer first __lowerCamelCase = Path(_UpperCamelCase ).parent.name __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings'''] __lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' ,model_max_length=_UpperCamelCase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(_UpperCamelCase ) # convert model __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""] if dataset == "large": __lowerCamelCase = task_specific_params __lowerCamelCase = convert_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() sd.pop('''model.decoder.embed_positions.weight''' ) sd.pop('''model.encoder.embed_positions.weight''' ) torch.save(_UpperCamelCase ,Path(_UpperCamelCase ) / '''pytorch_model.bin''' ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() if args.save_dir is None: a_ = Path(args.tf_ckpt_path).parent.name a_ = os.path.join("""pegasus""", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
330
0
'''simple docstring''' import cmath import math def snake_case_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> complex: UpperCAmelCase : Tuple = math.radians(_lowerCAmelCase ) UpperCAmelCase : str = math.radians(_lowerCAmelCase ) # Convert voltage and current to rectangular form UpperCAmelCase : Union[str, Any] = cmath.rect(_lowerCAmelCase , _lowerCAmelCase ) UpperCAmelCase : str = cmath.rect(_lowerCAmelCase , _lowerCAmelCase ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
23
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } a_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ): for attribute in key.split('''.''' ): __lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ) if weight_type is not None: __lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ).shape else: __lowerCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCamelCase = value elif weight_type == "weight_g": __lowerCamelCase = value elif weight_type == "weight_v": __lowerCamelCase = value elif weight_type == "bias": __lowerCamelCase = value else: __lowerCamelCase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Any ): __lowerCamelCase = [] __lowerCamelCase = fairseq_model.state_dict() __lowerCamelCase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __lowerCamelCase = False if "conv_layers" in name: load_conv_layer( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,) __lowerCamelCase = True else: for key, mapped_key in MAPPING.items(): __lowerCamelCase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue __lowerCamelCase = True if "*" in mapped_key: __lowerCamelCase = name.split(_UpperCamelCase )[0].split('''.''' )[-2] __lowerCamelCase = mapped_key.replace('''*''' ,_UpperCamelCase ) if "weight_g" in name: __lowerCamelCase = '''weight_g''' elif "weight_v" in name: __lowerCamelCase = '''weight_v''' elif "bias" in name: __lowerCamelCase = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCamelCase = '''weight''' else: __lowerCamelCase = None set_recursively(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) continue if not is_used: unused_weights.append(_UpperCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ): __lowerCamelCase = full_name.split('''conv_layers.''' )[-1] __lowerCamelCase = name.split('''.''' ) __lowerCamelCase = int(items[0] ) __lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_UpperCamelCase ) @torch.no_grad() def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : List[str] ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : List[Any]=True ): if config_path is not None: __lowerCamelCase = UniSpeechSatConfig.from_pretrained(_UpperCamelCase ) else: __lowerCamelCase = UniSpeechSatConfig() __lowerCamelCase = '''''' if is_finetuned: __lowerCamelCase = UniSpeechSatForCTC(_UpperCamelCase ) else: __lowerCamelCase = UniSpeechSatForPreTraining(_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) __lowerCamelCase = model[0].eval() recursively_load_weights(_UpperCamelCase ,_UpperCamelCase ) hf_wavavec.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) a_ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
330
0
from __future__ import annotations from random import random class SCREAMING_SNAKE_CASE__ : def __init__(self : int , a__ : int | None = None ): """simple docstring""" __snake_case = value __snake_case = random() __snake_case = None __snake_case = None def __repr__(self : List[Any] ): """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return f"""'{self.value}: {self.prior:.5}'""" else: return pformat( {f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 ) def __str__(self : int ): """simple docstring""" __snake_case = str(self.value ) + ''' ''' __snake_case = str(self.left or '''''' ) __snake_case = str(self.right or '''''' ) return value + left + right def lowerCamelCase__ ( snake_case_ : Node | None , snake_case_ : int ) -> tuple[Node | None, Node | None]: if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: __snake_case , __snake_case = split(root.left , snake_case_ ) return left, root else: __snake_case , __snake_case = split(root.right , snake_case_ ) return root, right def lowerCamelCase__ ( snake_case_ : Node | None , snake_case_ : Node | None ) -> Node | None: if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: __snake_case = merge(left.right , snake_case_ ) return left else: __snake_case = merge(snake_case_ , right.left ) return right def lowerCamelCase__ ( snake_case_ : Node | None , snake_case_ : int ) -> Node | None: __snake_case = Node(snake_case_ ) __snake_case , __snake_case = split(snake_case_ , snake_case_ ) return merge(merge(snake_case_ , snake_case_ ) , snake_case_ ) def lowerCamelCase__ ( snake_case_ : Node | None , snake_case_ : int ) -> Node | None: __snake_case , __snake_case = split(snake_case_ , value - 1 ) __snake_case , __snake_case = split(snake_case_ , snake_case_ ) return merge(snake_case_ , snake_case_ ) def lowerCamelCase__ ( snake_case_ : Node | None ) -> None: if not root: # None return else: inorder(root.left ) print(root.value , end=''',''' ) inorder(root.right ) def lowerCamelCase__ ( snake_case_ : Node | None , snake_case_ : str ) -> Node | None: for arg in args.split(): if arg[0] == "+": __snake_case = insert(snake_case_ , int(arg[1:] ) ) elif arg[0] == "-": __snake_case = erase(snake_case_ , int(arg[1:] ) ) else: print('''Unknown command''' ) return root def lowerCamelCase__ ( ) -> None: __snake_case = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) __snake_case = input() while args != "q": __snake_case = interact_treap(snake_case_ , snake_case_ ) print(snake_case_ ) __snake_case = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
24
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING a_ = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase__ ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type(__UpperCAmelCase ) def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , **__UpperCAmelCase ): '''simple docstring''' return {}, {}, {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = load_image(__UpperCAmelCase ) __lowerCamelCase = image.size __lowerCamelCase = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework ) return model_inputs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.model(**__UpperCAmelCase ) return model_outputs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = model_outputs.predicted_depth __lowerCamelCase = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=__UpperCAmelCase ) __lowerCamelCase = prediction.squeeze().cpu().numpy() __lowerCamelCase = (output * 255 / np.max(__UpperCAmelCase )).astype('''uint8''' ) __lowerCamelCase = Image.fromarray(__UpperCAmelCase ) __lowerCamelCase = {} __lowerCamelCase = predicted_depth __lowerCamelCase = depth return output_dict
330
0
"""simple docstring""" import re import string import numpy as np import datasets UpperCAmelCase__ : List[Any] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' UpperCAmelCase__ : Optional[int] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' UpperCAmelCase__ : Any = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ (datasets.Metric ): """simple docstring""" def __magic_name__ (self ) -> int: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , reference_urls=[] , ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , ) -> Optional[int]: """simple docstring""" if regexes_to_ignore is not None: for s in regexes_to_ignore: SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([re.sub(SCREAMING_SNAKE_CASE__ , """""" , SCREAMING_SNAKE_CASE__ ) for x in predictions] ) SCREAMING_SNAKE_CASE__ : List[Any] = np.array([re.sub(SCREAMING_SNAKE_CASE__ , """""" , SCREAMING_SNAKE_CASE__ ) for x in references] ) else: SCREAMING_SNAKE_CASE__ : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(SCREAMING_SNAKE_CASE__ ) if ignore_case: SCREAMING_SNAKE_CASE__ : Optional[int] = np.char.lower(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = np.char.lower(SCREAMING_SNAKE_CASE__ ) if ignore_punctuation: SCREAMING_SNAKE_CASE__ : Optional[Any] = string.punctuation.maketrans("""""" , """""" , string.punctuation ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.char.translate(SCREAMING_SNAKE_CASE__ , table=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = np.char.translate(SCREAMING_SNAKE_CASE__ , table=SCREAMING_SNAKE_CASE__ ) if ignore_numbers: SCREAMING_SNAKE_CASE__ : Optional[Any] = string.digits.maketrans("""""" , """""" , string.digits ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.char.translate(SCREAMING_SNAKE_CASE__ , table=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = np.char.translate(SCREAMING_SNAKE_CASE__ , table=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = predictions == references return {"exact_match": np.mean(SCREAMING_SNAKE_CASE__ ) * 1_00}
25
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = ["""pixel_values"""] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __lowerCamelCase = size if size is not None else {'''shortest_edge''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __lowerCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' ) __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = resample __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_normalize __lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD __lowerCamelCase = do_convert_rgb def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowerCamelCase = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = do_resize if do_resize is not None else self.do_resize __lowerCamelCase = size if size is not None else self.size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = resample if resample is not None else self.resample __lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCamelCase = crop_size if crop_size is not None else self.crop_size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCamelCase = image_mean if image_mean is not None else self.image_mean __lowerCamelCase = image_std if image_std is not None else self.image_std __lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowerCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowerCamelCase = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: __lowerCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: __lowerCamelCase = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: __lowerCamelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] __lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __lowerCamelCase = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
330
0
import requests def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : List[str] = {"""Content-Type""": """application/json"""} _A : str = requests.post(snake_case_,json={"""text""": message_body},headers=snake_case_ ) if response.status_code != 200: _A : Union[str, Any] = ( """Request to slack returned an error """ f'''{response.status_code}, the response is:\n{response.text}''' ) raise ValueError(snake_case_ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
26
from __future__ import annotations from typing import Generic, TypeVar a_ = TypeVar("""T""") class __lowerCAmelCase ( Generic[T] ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = data __lowerCamelCase = self __lowerCamelCase = 0 class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # map from node name to the node object __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # create a new set with x as its member __lowerCamelCase = DisjointSetTreeNode(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # find the set x belongs to (with path-compression) __lowerCamelCase = self.map[data] if elem_ref != elem_ref.parent: __lowerCamelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # helper function for union operation if nodea.rank > nodea.rank: __lowerCamelCase = nodea else: __lowerCamelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # merge 2 disjoint sets self.link(self.find_set(__UpperCAmelCase ) , self.find_set(__UpperCAmelCase ) ) class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # connections: map from the node to the neighbouring nodes (with weights) __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # add a node ONLY if its not present in the graph if node not in self.connections: __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # add an edge with the given weight self.add_node(__UpperCAmelCase ) self.add_node(__UpperCAmelCase ) __lowerCamelCase = weight __lowerCamelCase = weight def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __UpperCAmelCase : x[2] ) # creating the disjoint set __lowerCamelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__UpperCAmelCase ) # MST generation __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = edges[index] index += 1 __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) disjoint_set.union(__UpperCAmelCase , __UpperCAmelCase ) return graph
330
0
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): __a : Optional[Any] = tmp_path / 'file.csv' __a : Union[str, Any] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : str = tmp_path / 'malformed_file.csv' __a : int = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ): __a : Optional[Any] = tmp_path / 'csv_with_image.csv' __a : Dict = textwrap.dedent( F"""\ image {image_file} """ ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): __a : Union[str, Any] = tmp_path / 'csv_with_label.csv' __a : Any = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): __a : Dict = tmp_path / 'csv_with_int_list.csv' __a : Tuple = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ): __a : int = Csv() __a : str = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(_SCREAMING_SNAKE_CASE , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message for record in caplog.records ) @require_pil def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: __a : Tuple = f.read().splitlines()[1] __a : Tuple = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) __a : Any = csv._generate_tables([[csv_file_with_image]] ) __a : int = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() __a : Any = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: __a : Tuple = f.read().splitlines()[1:] __a : Optional[int] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) __a : List[str] = csv._generate_tables([[csv_file_with_label]] ) __a : Dict = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() __a : int = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ): __a : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} ) __a : Any = csv._generate_tables([[csv_file_with_int_list]] ) __a : Any = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) __a : Tuple = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
27
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self ): '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_config() __lowerCamelCase = 300 return config def lowerCamelCase ( self ): '''simple docstring''' ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = self.prepare_config_and_inputs() __lowerCamelCase = True __lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = True __lowerCamelCase = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_choices __lowerCamelCase = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = config_and_inputs __lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = () def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCamelCase = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason='''MRA does not output attentions''' ) def lowerCamelCase ( self ): '''simple docstring''' return @require_torch class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) __lowerCamelCase = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
0
'''simple docstring''' from __future__ import annotations _lowerCamelCase : str = [True] * 100_0001 _lowerCamelCase : str = 2 while i * i <= 100_0000: if seive[i]: for j in range(i * i, 100_0001, i): _lowerCamelCase : Optional[int] = False i += 1 def __lowerCamelCase ( A__ ) -> bool: """simple docstring""" return seive[n] def __lowerCamelCase ( A__ ) -> bool: """simple docstring""" return any(digit in '02468' for digit in str(A__ ) ) def __lowerCamelCase ( A__ = 1_000_000 ) -> list[int]: """simple docstring""" UpperCamelCase = [2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(A__ ) and not contains_an_even_digit(A__ ): UpperCamelCase = str(A__ ) UpperCamelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(A__ ) )] if all(is_prime(A__ ) for i in list_nums ): result.append(A__ ) return result def __lowerCamelCase ( ) -> int: """simple docstring""" return len(find_circular_primes() ) if __name__ == "__main__": print(f'''{len(find_circular_primes()) = }''')
28
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""EncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TFEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""FlaxEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
0
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : Tuple = IFInpaintingPipeline _snake_case : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} _snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS _snake_case : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''} def __UpperCAmelCase ( self ) -> Dict: return self._get_dummy_components() def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> int: if str(_UpperCamelCase ).startswith('mps' ): UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase ) else: UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) UpperCAmelCase_ : Any = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) UpperCAmelCase_ : Any = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) UpperCAmelCase_ : List[str] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __UpperCAmelCase ( self ) -> Any: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __UpperCAmelCase ( self ) -> List[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __UpperCAmelCase ( self ) -> Optional[int]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __UpperCAmelCase ( self ) -> Optional[int]: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __UpperCAmelCase ( self ) -> Dict: self._test_save_load_local() def __UpperCAmelCase ( self ) -> List[str]: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
29
from string import ascii_lowercase, ascii_uppercase def a__ ( _UpperCamelCase : str ): if not sentence: return "" __lowerCamelCase = dict(zip(_UpperCamelCase ,_UpperCamelCase ) ) return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
330
0
import mpmath # for roots of unity import numpy as np class lowercase__: """simple docstring""" def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Tuple=None ) -> List[Any]: # Input as list lowercase_ = list(poly_a or [0] )[:] lowercase_ = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() lowercase_ = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() lowercase_ = len(self.polyB ) # Add 0 to make lengths equal a power of 2 lowercase_ = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform lowercase_ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product lowercase_ = self.__multiply() def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]: lowercase_ = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB] # Corner case if len(SCREAMING_SNAKE_CASE_ ) <= 1: return dft[0] # lowercase_ = self.c_max_length // 2 while next_ncol > 0: lowercase_ = [[] for i in range(SCREAMING_SNAKE_CASE_ )] lowercase_ = self.root**next_ncol # First half of next step lowercase_ = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(SCREAMING_SNAKE_CASE_ ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step lowercase_ = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(SCREAMING_SNAKE_CASE_ ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update lowercase_ = new_dft lowercase_ = next_ncol // 2 return dft[0] def _lowercase ( self : int ) -> Dict: lowercase_ = self.__dft('''A''' ) lowercase_ = self.__dft('''B''' ) lowercase_ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT lowercase_ = 2 while next_ncol <= self.c_max_length: lowercase_ = [[] for i in range(SCREAMING_SNAKE_CASE_ )] lowercase_ = self.root ** (next_ncol // 2) lowercase_ = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update lowercase_ = new_inverse_c next_ncol *= 2 # Unpack lowercase_ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : str ) -> Optional[int]: lowercase_ = '''A = ''' + ''' + '''.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) lowercase_ = '''B = ''' + ''' + '''.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) lowercase_ = '''A*B = ''' + ''' + '''.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return f'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
30
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __lowerCAmelCase ( lowerCAmelCase__ ): @slow @require_torch def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' ) __lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) __lowerCamelCase = bertabert.config.encoder.vocab_size __lowerCamelCase = tokenizer.sep_token_id __lowerCamelCase = tokenizer.cls_token_id __lowerCamelCase = 128 __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' ) __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' ) __lowerCamelCase = train_dataset.select(range(32 ) ) __lowerCamelCase = val_dataset.select(range(16 ) ) __lowerCamelCase = 4 def _map_to_encoder_decoder_inputs(__UpperCAmelCase ): # Tokenizer will automatically set [BOS] <text> [EOS] __lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 ) __lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 ) __lowerCamelCase = inputs.input_ids __lowerCamelCase = inputs.attention_mask __lowerCamelCase = outputs.input_ids __lowerCamelCase = outputs.input_ids.copy() __lowerCamelCase = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels'''] ] __lowerCamelCase = outputs.attention_mask assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids ) assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(__UpperCAmelCase ): __lowerCamelCase = pred.label_ids __lowerCamelCase = pred.predictions # all unnecessary tokens are removed __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase ) return {"accuracy": accuracy} # map train dataset __lowerCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) train_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) # same for validation dataset __lowerCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) val_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) __lowerCamelCase = self.get_auto_remove_tmp_dir() __lowerCamelCase = SeqaSeqTrainingArguments( output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __lowerCamelCase = SeqaSeqTrainer( model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , ) # start training trainer.train()
330
0
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs __SCREAMING_SNAKE_CASE : Any = imread(R"""digital_image_processing/image_data/lena_small.jpg""") __SCREAMING_SNAKE_CASE : Tuple = cvtColor(img, COLOR_BGR2GRAY) def UpperCamelCase_ ( ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : List[Any] = cn.convert_to_negative(_UpperCAmelCase ) # assert negative_img array for at least one True assert negative_img.any() def UpperCamelCase_ ( ) -> Optional[Any]: """simple docstring""" with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img: # Work around assertion for response assert str(cc.change_contrast(_UpperCAmelCase , 110 ) ).startswith( "<PIL.Image.Image image mode=RGB size=100x100 at" ) def UpperCamelCase_ ( ) -> Tuple: """simple docstring""" _UpperCAmelCase : int = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def UpperCamelCase_ ( ) -> Tuple: """simple docstring""" _UpperCAmelCase : str = imread("digital_image_processing/image_data/lena_small.jpg" , 0 ) # assert ambiguous array for all == True assert canny_img.all() _UpperCAmelCase : str = canny.canny(_UpperCAmelCase ) # assert canny array for at least one True assert canny_array.any() def UpperCamelCase_ ( ) -> Optional[Any]: """simple docstring""" assert gg.gaussian_filter(_UpperCAmelCase , 5 , sigma=0.9 ).all() def UpperCamelCase_ ( ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : List[str] = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] ) _UpperCAmelCase : List[str] = conv.img_convolve(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase ) assert res.any() def UpperCamelCase_ ( ) -> Optional[Any]: """simple docstring""" assert med.median_filter(_UpperCAmelCase , 3 ).any() def UpperCamelCase_ ( ) -> List[str]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase : Any = sob.sobel_filter(_UpperCAmelCase ) assert grad.any() and theta.any() def UpperCamelCase_ ( ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Optional[Any] = sp.make_sepia(_UpperCAmelCase , 20 ) assert sepia.all() def UpperCamelCase_ ( _UpperCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" ) -> Tuple: """simple docstring""" _UpperCAmelCase : Tuple = bs.Burkes(imread(_UpperCAmelCase , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def UpperCamelCase_ ( _UpperCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> List[str]: """simple docstring""" _UpperCAmelCase : Tuple = rs.NearestNeighbour(imread(_UpperCAmelCase , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def UpperCamelCase_ ( ) -> Tuple: """simple docstring""" _UpperCAmelCase : str = "digital_image_processing/image_data/lena.jpg" # Reading the image and converting it to grayscale. _UpperCAmelCase : Tuple = imread(_UpperCAmelCase , 0 ) # Test for get_neighbors_pixel function() return not None _UpperCAmelCase : Any = 0 _UpperCAmelCase : str = 0 _UpperCAmelCase : Tuple = image[x_coordinate][y_coordinate] _UpperCAmelCase : Dict = lbp.get_neighbors_pixel( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image _UpperCAmelCase : Dict = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): _UpperCAmelCase : List[str] = lbp.local_binary_value(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) assert lbp_image.any()
31
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase_ : str = { 'configuration_altclip': [ 'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig', ], 'processing_altclip': ['AltCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Any = [ 'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'AltCLIPPreTrainedModel', 'AltCLIPModel', 'AltCLIPTextModel', 'AltCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
32
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=1000 ): '''simple docstring''' __lowerCamelCase = p_stop __lowerCamelCase = max_length def __iter__( self ): '''simple docstring''' __lowerCamelCase = 0 __lowerCamelCase = False while not stop and count < self.max_length: yield count count += 1 __lowerCamelCase = random.random() < self.p_stop class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = [ BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 ) ] __lowerCamelCase = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] __lowerCamelCase = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ): '''simple docstring''' random.seed(__UpperCAmelCase ) __lowerCamelCase = list(__UpperCAmelCase ) __lowerCamelCase = [ IterableDatasetShard( __UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , ) for i in range(__UpperCAmelCase ) ] __lowerCamelCase = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(__UpperCAmelCase ) iterable_dataset_lists.append(list(__UpperCAmelCase ) ) __lowerCamelCase = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size __lowerCamelCase = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 ) __lowerCamelCase = [] for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(__UpperCAmelCase ) < len(__UpperCAmelCase ): reference += reference self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = 42 __lowerCamelCase = RandomIterableDataset() self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) # Edge case with a very small dataset __lowerCamelCase = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = SkipBatchSampler(__UpperCAmelCase , 2 ) self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 ) __lowerCamelCase = skip_first_batches(__UpperCAmelCase , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def lowerCamelCase ( self ): '''simple docstring''' Accelerator() __lowerCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
330
0
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig __A : Union[str, Any] = logging.get_logger(__name__) # General docstring __A : Tuple = '''MobileNetV1Config''' # Base docstring __A : Union[str, Any] = '''google/mobilenet_v1_1.0_224''' __A : Union[str, Any] = [1, 1_024, 7, 7] # Image classification docstring __A : Optional[Any] = '''google/mobilenet_v1_1.0_224''' __A : List[Any] = '''tabby, tabby cat''' __A : Union[str, Any] = [ '''google/mobilenet_v1_1.0_224''', '''google/mobilenet_v1_0.75_192''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def lowercase ( __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Dict=None ): lowercase_ : str = {} if isinstance(__snake_case , __snake_case ): lowercase_ : Union[str, Any] = model.mobilenet_va else: lowercase_ : Optional[Any] = model lowercase_ : Union[str, Any] = '''MobilenetV1/Conv2d_0/''' lowercase_ : Union[str, Any] = backbone.conv_stem.convolution.weight lowercase_ : Optional[Any] = backbone.conv_stem.normalization.bias lowercase_ : Union[str, Any] = backbone.conv_stem.normalization.weight lowercase_ : Any = backbone.conv_stem.normalization.running_mean lowercase_ : int = backbone.conv_stem.normalization.running_var for i in range(1_3 ): lowercase_ : Optional[int] = i + 1 lowercase_ : Union[str, Any] = i * 2 lowercase_ : Optional[Any] = backbone.layer[pt_index] lowercase_ : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/''' lowercase_ : str = pointer.convolution.weight lowercase_ : int = pointer.normalization.bias lowercase_ : Any = pointer.normalization.weight lowercase_ : Dict = pointer.normalization.running_mean lowercase_ : Union[str, Any] = pointer.normalization.running_var lowercase_ : Any = backbone.layer[pt_index + 1] lowercase_ : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/''' lowercase_ : int = pointer.convolution.weight lowercase_ : str = pointer.normalization.bias lowercase_ : Tuple = pointer.normalization.weight lowercase_ : Dict = pointer.normalization.running_mean lowercase_ : Any = pointer.normalization.running_var if isinstance(__snake_case , __snake_case ): lowercase_ : Optional[Any] = '''MobilenetV1/Logits/Conv2d_1c_1x1/''' lowercase_ : Any = model.classifier.weight lowercase_ : Optional[int] = model.classifier.bias return tf_to_pt_map def lowercase ( __snake_case : Optional[int] , __snake_case : int , __snake_case : Dict ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( '''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ''' '''https://www.tensorflow.org/install/ for installation instructions.''' ) raise # Load weights from TF model lowercase_ : Tuple = tf.train.list_variables(__snake_case ) lowercase_ : int = {} for name, shape in init_vars: logger.info(F'''Loading TF weight {name} with shape {shape}''' ) lowercase_ : Optional[Any] = tf.train.load_variable(__snake_case , __snake_case ) lowercase_ : Optional[int] = array # Build TF to PyTorch weights loading map lowercase_ : Any = _build_tf_to_pytorch_map(__snake_case , __snake_case , __snake_case ) for name, pointer in tf_to_pt_map.items(): logger.info(F'''Importing {name}''' ) if name not in tf_weights: logger.info(F'''{name} not in tf pre-trained weights, skipping''' ) continue lowercase_ : Union[str, Any] = tf_weights[name] if "depthwise_weights" in name: logger.info('''Transposing depthwise''' ) lowercase_ : Any = np.transpose(__snake_case , (2, 3, 0, 1) ) elif "weights" in name: logger.info('''Transposing''' ) if len(pointer.shape ) == 2: # copying into linear layer lowercase_ : Optional[int] = array.squeeze().transpose() else: lowercase_ : Optional[int] = np.transpose(__snake_case , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' ) logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' ) lowercase_ : str = torch.from_numpy(__snake_case ) tf_weights.pop(__snake_case , __snake_case ) tf_weights.pop(name + '''/RMSProp''' , __snake_case ) tf_weights.pop(name + '''/RMSProp_1''' , __snake_case ) tf_weights.pop(name + '''/ExponentialMovingAverage''' , __snake_case ) logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' ) return model def lowercase ( __snake_case : torch.Tensor , __snake_case : nn.Convad ): lowercase_ , lowercase_ : Optional[int] = features.shape[-2:] lowercase_ , lowercase_ : str = conv_layer.stride lowercase_ , lowercase_ : Tuple = conv_layer.kernel_size if in_height % stride_height == 0: lowercase_ : Dict = max(kernel_height - stride_height , 0 ) else: lowercase_ : List[Any] = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: lowercase_ : str = max(kernel_width - stride_width , 0 ) else: lowercase_ : int = max(kernel_width - (in_width % stride_width) , 0 ) lowercase_ : int = pad_along_width // 2 lowercase_ : Union[str, Any] = pad_along_width - pad_left lowercase_ : Tuple = pad_along_height // 2 lowercase_ : List[str] = pad_along_height - pad_top lowercase_ : str = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(__snake_case , __snake_case , '''constant''' , 0.0 ) class _UpperCAmelCase ( nn.Module ): def __init__( self : List[Any] , A : MobileNetVaConfig , A : int , A : int , A : int , A : Optional[int] = 1 , A : Optional[int] = 1 , A : bool = False , A : Optional[bool] = True , A : Optional[bool or str] = True , ) -> None: super().__init__() lowercase_ : int = config if in_channels % groups != 0: raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' ) if out_channels % groups != 0: raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' ) lowercase_ : Tuple = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) lowercase_ : int = nn.Convad( in_channels=A , out_channels=A , kernel_size=A , stride=A , padding=A , groups=A , bias=A , padding_mode='''zeros''' , ) if use_normalization: lowercase_ : Optional[Any] = nn.BatchNormad( num_features=A , eps=config.layer_norm_eps , momentum=0.9997 , affine=A , track_running_stats=A , ) else: lowercase_ : Union[str, Any] = None if use_activation: if isinstance(A , A ): lowercase_ : str = ACTaFN[use_activation] elif isinstance(config.hidden_act , A ): lowercase_ : Any = ACTaFN[config.hidden_act] else: lowercase_ : Tuple = config.hidden_act else: lowercase_ : Tuple = None def A ( self : str , A : torch.Tensor ) -> torch.Tensor: if self.config.tf_padding: lowercase_ : List[Any] = apply_tf_padding(A , self.convolution ) lowercase_ : Optional[int] = self.convolution(A ) if self.normalization is not None: lowercase_ : Union[str, Any] = self.normalization(A ) if self.activation is not None: lowercase_ : Optional[int] = self.activation(A ) return features class _UpperCAmelCase ( _A ): SCREAMING_SNAKE_CASE_ : Optional[int] = MobileNetVaConfig SCREAMING_SNAKE_CASE_ : int = load_tf_weights_in_mobilenet_va SCREAMING_SNAKE_CASE_ : Optional[Any] = "mobilenet_v1" SCREAMING_SNAKE_CASE_ : Union[str, Any] = "pixel_values" SCREAMING_SNAKE_CASE_ : List[str] = False def A ( self : Any , A : Union[nn.Linear, nn.Convad] ) -> None: if isinstance(A , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(A , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) __A : Union[str, Any] = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' __A : List[str] = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , _A , ) class _UpperCAmelCase ( _A ): def __init__( self : str , A : MobileNetVaConfig , A : bool = True ) -> int: super().__init__(A ) lowercase_ : Union[str, Any] = config lowercase_ : List[str] = 32 lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth ) lowercase_ : Union[str, Any] = MobileNetVaConvLayer( A , in_channels=config.num_channels , out_channels=A , kernel_size=3 , stride=2 , ) lowercase_ : Optional[Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] lowercase_ : List[Any] = nn.ModuleList() for i in range(13 ): lowercase_ : Dict = out_channels if strides[i] == 2 or i == 0: depth *= 2 lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( A , in_channels=A , out_channels=A , kernel_size=3 , stride=strides[i] , groups=A , ) ) self.layer.append( MobileNetVaConvLayer( A , in_channels=A , out_channels=A , kernel_size=1 , ) ) lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def A ( self : Any , A : Optional[Any] ) -> Optional[int]: raise NotImplementedError @add_start_docstrings_to_model_forward(A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def A ( self : List[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: lowercase_ : str = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('''You have to specify pixel_values''' ) lowercase_ : List[str] = self.conv_stem(A ) lowercase_ : Dict = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): lowercase_ : Optional[int] = layer_module(A ) if output_hidden_states: lowercase_ : str = all_hidden_states + (hidden_states,) lowercase_ : Tuple = hidden_states if self.pooler is not None: lowercase_ : Dict = torch.flatten(self.pooler(A ) , start_dim=1 ) else: lowercase_ : Optional[Any] = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=A , pooler_output=A , hidden_states=A , ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _A , ) class _UpperCAmelCase ( _A ): def __init__( self : List[str] , A : MobileNetVaConfig ) -> None: super().__init__(A ) lowercase_ : int = config.num_labels lowercase_ : List[str] = MobileNetVaModel(A ) lowercase_ : Union[str, Any] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head lowercase_ : Tuple = nn.Dropout(config.classifier_dropout_prob , inplace=A ) lowercase_ : int = nn.Linear(A , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def A ( self : Optional[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict lowercase_ : List[Any] = self.mobilenet_va(A , output_hidden_states=A , return_dict=A ) lowercase_ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1] lowercase_ : Dict = self.classifier(self.dropout(A ) ) lowercase_ : int = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase_ : List[str] = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase_ : Optional[Any] = '''single_label_classification''' else: lowercase_ : Tuple = '''multi_label_classification''' if self.config.problem_type == "regression": lowercase_ : str = MSELoss() if self.num_labels == 1: lowercase_ : List[str] = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowercase_ : List[str] = loss_fct(A , A ) elif self.config.problem_type == "single_label_classification": lowercase_ : List[Any] = CrossEntropyLoss() lowercase_ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase_ : str = BCEWithLogitsLoss() lowercase_ : List[Any] = loss_fct(A , A ) if not return_dict: lowercase_ : Tuple = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=A , logits=A , hidden_states=outputs.hidden_states , )
33
def a__ ( _UpperCamelCase : int ): __lowerCamelCase = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
330
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) A ={ 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A =['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A =[ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A =[ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A =['LayoutLMv3FeatureExtractor'] A =['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
34
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ = 16 a_ = 32 def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : int = 16 ): __lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' ) def tokenize_function(_UpperCamelCase : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCamelCase = datasets.map( _UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' ) def collate_fn(_UpperCamelCase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCamelCase = 16 elif accelerator.mixed_precision != "no": __lowerCamelCase = 8 else: __lowerCamelCase = None return tokenizer.pad( _UpperCamelCase ,padding='''longest''' ,max_length=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_tensors='''pt''' ,) # Instantiate dataloaders. __lowerCamelCase = DataLoader( tokenized_datasets['''train'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) __lowerCamelCase = DataLoader( tokenized_datasets['''validation'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders a_ = mocked_dataloaders # noqa: F811 def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,_UpperCamelCase ) == "1": __lowerCamelCase = 2 # Initialize accelerator __lowerCamelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCamelCase = config['''lr'''] __lowerCamelCase = int(config['''num_epochs'''] ) __lowerCamelCase = int(config['''seed'''] ) __lowerCamelCase = int(config['''batch_size'''] ) __lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=_UpperCamelCase ) def inner_training_loop(_UpperCamelCase : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(_UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCamelCase = model.to(accelerator.device ) # Instantiate optimizer __lowerCamelCase = AdamW(params=model.parameters() ,lr=_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = get_dataloaders(_UpperCamelCase ,_UpperCamelCase ) # Instantiate scheduler __lowerCamelCase = get_linear_schedule_with_warmup( optimizer=_UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(_UpperCamelCase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = accelerator.prepare( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) # Now we train the model for epoch in range(_UpperCamelCase ): model.train() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.loss accelerator.backward(_UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.logits.argmax(dim=-1 ) __lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_UpperCamelCase ,references=_UpperCamelCase ,) __lowerCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" ,_UpperCamelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a__ ( ): __lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' ,) parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_UpperCamelCase ,_UpperCamelCase ) if __name__ == "__main__": main()
330
0
'''simple docstring''' import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { "kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json", } class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = "align_text_model" def __init__( self : Dict , snake_case_ : Any=30_522 , snake_case_ : Tuple=768 , snake_case_ : Any=12 , snake_case_ : Union[str, Any]=12 , snake_case_ : List[Any]=3_072 , snake_case_ : Dict="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : str=0.1 , snake_case_ : List[str]=512 , snake_case_ : str=2 , snake_case_ : Optional[Any]=0.02 , snake_case_ : Optional[Any]=1E-1_2 , snake_case_ : List[Any]=0 , snake_case_ : Union[str, Any]="absolute" , snake_case_ : List[Any]=True , **snake_case_ : List[Any] , ): super().__init__(**snake_case_ ) snake_case__ : str = vocab_size snake_case__ : str = hidden_size snake_case__ : Optional[Any] = num_hidden_layers snake_case__ : Union[str, Any] = num_attention_heads snake_case__ : Tuple = hidden_act snake_case__ : int = intermediate_size snake_case__ : Tuple = hidden_dropout_prob snake_case__ : Dict = attention_probs_dropout_prob snake_case__ : Tuple = max_position_embeddings snake_case__ : Tuple = type_vocab_size snake_case__ : Dict = initializer_range snake_case__ : str = layer_norm_eps snake_case__ : str = position_embedding_type snake_case__ : Dict = use_cache snake_case__ : Tuple = pad_token_id @classmethod def lowerCamelCase ( cls : Union[str, Any] , snake_case_ : Union[str, os.PathLike] , **snake_case_ : Optional[int] ): cls._set_token_in_kwargs(snake_case_ ) snake_case__ , snake_case__ : List[str] = cls.get_config_dict(snake_case_ , **snake_case_ ) # get the text config dict if we are loading from AlignConfig if config_dict.get("""model_type""" ) == "align": snake_case__ : Optional[Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(snake_case_ , **snake_case_ ) class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = "align_vision_model" def __init__( self : Union[str, Any] , snake_case_ : int = 3 , snake_case_ : int = 600 , snake_case_ : float = 2.0 , snake_case_ : float = 3.1 , snake_case_ : int = 8 , snake_case_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , snake_case_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , snake_case_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , snake_case_ : List[int] = [] , snake_case_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , snake_case_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , snake_case_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , snake_case_ : float = 0.25 , snake_case_ : str = "swish" , snake_case_ : int = 2_560 , snake_case_ : str = "mean" , snake_case_ : float = 0.02 , snake_case_ : float = 0.001 , snake_case_ : float = 0.99 , snake_case_ : float = 0.2 , **snake_case_ : Optional[int] , ): super().__init__(**snake_case_ ) snake_case__ : int = num_channels snake_case__ : Optional[int] = image_size snake_case__ : int = width_coefficient snake_case__ : str = depth_coefficient snake_case__ : Optional[int] = depth_divisor snake_case__ : Any = kernel_sizes snake_case__ : Optional[int] = in_channels snake_case__ : Optional[Any] = out_channels snake_case__ : Tuple = depthwise_padding snake_case__ : Tuple = strides snake_case__ : str = num_block_repeats snake_case__ : Any = expand_ratios snake_case__ : Any = squeeze_expansion_ratio snake_case__ : Tuple = hidden_act snake_case__ : int = hidden_dim snake_case__ : int = pooling_type snake_case__ : List[Any] = initializer_range snake_case__ : Optional[Any] = batch_norm_eps snake_case__ : Optional[int] = batch_norm_momentum snake_case__ : int = drop_connect_rate snake_case__ : int = sum(snake_case_ ) * 4 @classmethod def lowerCamelCase ( cls : str , snake_case_ : Union[str, os.PathLike] , **snake_case_ : List[Any] ): cls._set_token_in_kwargs(snake_case_ ) snake_case__ , snake_case__ : Optional[Any] = cls.get_config_dict(snake_case_ , **snake_case_ ) # get the vision config dict if we are loading from AlignConfig if config_dict.get("""model_type""" ) == "align": snake_case__ : Optional[Any] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(snake_case_ , **snake_case_ ) class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = "align" lowercase = True def __init__( self : int , snake_case_ : List[Any]=None , snake_case_ : List[str]=None , snake_case_ : Tuple=640 , snake_case_ : List[Any]=1.0 , snake_case_ : List[str]=0.02 , **snake_case_ : Any , ): super().__init__(**snake_case_ ) if text_config is None: snake_case__ : Optional[int] = {} logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" ) if vision_config is None: snake_case__ : Optional[Any] = {} logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" ) snake_case__ : List[str] = AlignTextConfig(**snake_case_ ) snake_case__ : Optional[Any] = AlignVisionConfig(**snake_case_ ) snake_case__ : Dict = projection_dim snake_case__ : List[str] = temperature_init_value snake_case__ : Tuple = initializer_range @classmethod def lowerCamelCase ( cls : Union[str, Any] , snake_case_ : AlignTextConfig , snake_case_ : AlignVisionConfig , **snake_case_ : Optional[int] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ ) def lowerCamelCase ( self : List[Any] ): snake_case__ : int = copy.deepcopy(self.__dict__ ) snake_case__ : Union[str, Any] = self.text_config.to_dict() snake_case__ : List[str] = self.vision_config.to_dict() snake_case__ : List[str] = self.__class__.model_type return output
35
import logging import os import threading import time try: import warnings except ImportError: a_ = None try: import msvcrt except ImportError: a_ = None try: import fcntl except ImportError: a_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: a_ = OSError # Data # ------------------------------------------------ a_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] a_ = """3.0.12""" a_ = None def a__ ( ): global _logger __lowerCamelCase = _logger or logging.getLogger(__name__ ) return _logger class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock_file return None def __str__( self ): '''simple docstring''' __lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class __lowerCAmelCase : def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock return None def __enter__( self ): '''simple docstring''' return self.lock def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.lock.release() return None class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase ) # The path to the lock file. __lowerCamelCase = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __lowerCamelCase = None # The default timeout value. __lowerCamelCase = timeout # We use this lock primarily for the lock counter. __lowerCamelCase = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __lowerCamelCase = 0 return None @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file @property def lowerCamelCase ( self ): '''simple docstring''' return self._timeout @timeout.setter def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = float(__UpperCAmelCase ) return None def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file_fd is not None def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ): '''simple docstring''' # Use the default timeout, if no timeout is provided. if timeout is None: __lowerCamelCase = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file __lowerCamelCase = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(__UpperCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __lowerCamelCase = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def lowerCamelCase ( self , __UpperCAmelCase=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __lowerCamelCase = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self ): '''simple docstring''' self.acquire() return self def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.release() return None def __del__( self ): '''simple docstring''' self.release(force=__UpperCAmelCase ) return None def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = os.path.basename(__UpperCAmelCase ) if len(__UpperCAmelCase ) > max_length and max_length > 0: __lowerCamelCase = os.path.dirname(__UpperCAmelCase ) __lowerCamelCase = str(hash(__UpperCAmelCase ) ) __lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(__UpperCAmelCase , __UpperCAmelCase ) else: return path class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) __lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: try: msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(__UpperCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) try: fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN ) os.close(__UpperCAmelCase ) return None class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' os.close(self._lock_file_fd ) __lowerCamelCase = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None a_ = None if msvcrt: a_ = WindowsFileLock elif fcntl: a_ = UnixFileLock else: a_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
330
0
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class UpperCAmelCase_ : def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=64, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ): '''simple docstring''' _lowerCAmelCase : Tuple = parent _lowerCAmelCase : Optional[int] = batch_size _lowerCAmelCase : Optional[int] = seq_length _lowerCAmelCase : List[Any] = is_training _lowerCAmelCase : Any = use_input_mask _lowerCAmelCase : List[Any] = use_token_type_ids _lowerCAmelCase : str = use_labels _lowerCAmelCase : Optional[int] = vocab_size _lowerCAmelCase : Optional[Any] = hidden_size _lowerCAmelCase : int = num_hidden_layers _lowerCAmelCase : str = num_attention_heads _lowerCAmelCase : Any = intermediate_size _lowerCAmelCase : Optional[Any] = hidden_act _lowerCAmelCase : Optional[int] = hidden_dropout_prob _lowerCAmelCase : List[Any] = attention_probs_dropout_prob _lowerCAmelCase : List[Any] = max_position_embeddings _lowerCAmelCase : Union[str, Any] = type_vocab_size _lowerCAmelCase : str = type_sequence_label_size _lowerCAmelCase : Optional[Any] = initializer_range _lowerCAmelCase : Dict = num_labels _lowerCAmelCase : Any = num_choices _lowerCAmelCase : Optional[int] = scope _lowerCAmelCase : List[str] = vocab_size - 1 def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) _lowerCAmelCase : Union[str, Any] = None if self.use_input_mask: _lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length]) _lowerCAmelCase : Any = None if self.use_labels: _lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels) _lowerCAmelCase : Optional[Any] = self.get_config() return config, input_ids, input_mask, token_labels def snake_case__ ( self): '''simple docstring''' return GPTNeoXConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__a, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = self.prepare_config_and_inputs() _lowerCAmelCase : str = True return config, input_ids, input_mask, token_labels def snake_case__ ( self, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : List[str] = GPTNeoXModel(config=__a) model.to(__a) model.eval() _lowerCAmelCase : Optional[int] = model(__a, attention_mask=__a) _lowerCAmelCase : str = model(__a) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def snake_case__ ( self, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : List[Any] = True _lowerCAmelCase : List[Any] = GPTNeoXModel(__a) model.to(__a) model.eval() _lowerCAmelCase : List[Any] = model(__a, attention_mask=__a) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def snake_case__ ( self, __a, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : int = GPTNeoXForCausalLM(config=__a) model.to(__a) model.eval() _lowerCAmelCase : Tuple = model(__a, attention_mask=__a, labels=__a) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def snake_case__ ( self, __a, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.num_labels _lowerCAmelCase : Union[str, Any] = GPTNeoXForQuestionAnswering(__a) model.to(__a) model.eval() _lowerCAmelCase : Tuple = model(__a, attention_mask=__a) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def snake_case__ ( self, __a, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : str = self.num_labels _lowerCAmelCase : str = GPTNeoXForSequenceClassification(__a) model.to(__a) model.eval() _lowerCAmelCase : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size) _lowerCAmelCase : Dict = model(__a, attention_mask=__a, labels=__a) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def snake_case__ ( self, __a, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : str = self.num_labels _lowerCAmelCase : Dict = GPTNeoXForTokenClassification(__a) model.to(__a) model.eval() _lowerCAmelCase : str = model(__a, attention_mask=__a, labels=__a) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def snake_case__ ( self, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : Tuple = True _lowerCAmelCase : Dict = GPTNeoXForCausalLM(config=__a) model.to(__a) model.eval() # first forward pass _lowerCAmelCase : List[Any] = model(__a, attention_mask=__a, use_cache=__a) _lowerCAmelCase : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _lowerCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size) _lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and _lowerCAmelCase : int = torch.cat([input_ids, next_tokens], dim=-1) _lowerCAmelCase : str = torch.cat([input_mask, next_mask], dim=-1) _lowerCAmelCase : List[str] = model(__a, attention_mask=__a, output_hidden_states=__a) _lowerCAmelCase : List[str] = output_from_no_past["hidden_states"][0] _lowerCAmelCase : List[Any] = model( __a, attention_mask=__a, past_key_values=__a, output_hidden_states=__a, )["hidden_states"][0] # select random slice _lowerCAmelCase : int = ids_tensor((1,), output_from_past.shape[-1]).item() _lowerCAmelCase : str = output_from_no_past[:, -3:, random_slice_idx].detach() _lowerCAmelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__a, __a, atol=1E-3)) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = config_and_inputs _lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( a , a , a , unittest.TestCase): lowerCamelCase__ = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowerCamelCase__ = (GPTNeoXForCausalLM,) if is_torch_available() else () lowerCamelCase__ = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = GPTNeoXModelTester(self) _lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=64, num_attention_heads=8) def snake_case__ ( self): '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__a, __a, __a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(__a, __a, __a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() _lowerCAmelCase : List[str] = None self.model_tester.create_and_check_model_as_decoder(__a, __a, __a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(__a, __a, __a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a) @unittest.skip(reason="Feed forward chunking is not implemented") def snake_case__ ( self): '''simple docstring''' pass @parameterized.expand([("linear",), ("dynamic",)]) def snake_case__ ( self, __a): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : Optional[Any] = ids_tensor([1, 10], config.vocab_size) _lowerCAmelCase : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights _lowerCAmelCase : List[Any] = GPTNeoXModel(__a) original_model.to(__a) original_model.eval() _lowerCAmelCase : Union[str, Any] = original_model(__a).last_hidden_state _lowerCAmelCase : Union[str, Any] = original_model(__a).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights _lowerCAmelCase : int = {"type": scaling_type, "factor": 10.0} _lowerCAmelCase : Optional[int] = GPTNeoXModel(__a) scaled_model.to(__a) scaled_model.eval() _lowerCAmelCase : Optional[Any] = scaled_model(__a).last_hidden_state _lowerCAmelCase : Optional[Any] = scaled_model(__a).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__a, __a, atol=1E-5)) else: self.assertFalse(torch.allclose(__a, __a, atol=1E-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(__a, __a, atol=1E-5)) @require_torch class UpperCAmelCase_ ( unittest.TestCase): @slow def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped") for checkpointing in [True, False]: _lowerCAmelCase : List[str] = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped") if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(__a) _lowerCAmelCase : str = tokenizer("My favorite food is", return_tensors="pt").to(__a) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 _lowerCAmelCase : str = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure" _lowerCAmelCase : Any = model.generate(**__a, do_sample=__a, max_new_tokens=20) _lowerCAmelCase : str = tokenizer.batch_decode(__a)[0] self.assertEqual(__a, __a)
36
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase="divided_space_time" , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = num_channels __lowerCamelCase = patch_size __lowerCamelCase = num_frames __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = attention_type __lowerCamelCase = initializer_range __lowerCamelCase = scope __lowerCamelCase = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __lowerCamelCase = (image_size // patch_size) ** 2 __lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __lowerCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __lowerCamelCase = self.num_labels return config def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) # verify the logits shape __lowerCamelCase = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = config_and_inputs __lowerCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowerCAmelCase__ = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerModelTester(self ) __lowerCamelCase = ConfigTester( self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = copy.deepcopy(__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def lowerCamelCase ( self ): '''simple docstring''' pass def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = TimesformerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' if not self.has_attentions: pass else: __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase = True for model_class in self.all_model_classes: __lowerCamelCase = self.model_tester.seq_length __lowerCamelCase = self.model_tester.num_frames __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __lowerCamelCase = len(__UpperCAmelCase ) # Check attention is always last and order is fine __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(out_len + 1 , len(__UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowerCamelCase ( self ): '''simple docstring''' def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.hidden_states __lowerCamelCase = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) __lowerCamelCase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def a__ ( ): __lowerCamelCase = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' ) __lowerCamelCase = np.load(_UpperCamelCase ) return list(_UpperCamelCase ) @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self ): '''simple docstring''' # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( __UpperCAmelCase ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_video() __lowerCamelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**__UpperCAmelCase ) # verify the logits __lowerCamelCase = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
0
'''simple docstring''' import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _lowerCAmelCase = logging.get_logger(__name__) class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> str: if not conversation_id: lowerCAmelCase__ : List[str] = uuid.uuida() if past_user_inputs is None: lowerCAmelCase__ : List[Any] = [] if generated_responses is None: lowerCAmelCase__ : str = [] lowerCAmelCase__ : uuid.UUID = conversation_id lowerCAmelCase__ : List[str] = past_user_inputs lowerCAmelCase__ : List[str] = generated_responses lowerCAmelCase__ : Optional[str] = text def __eq__( self ,__UpperCAmelCase ) -> Dict: if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Optional[Any]: if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) lowerCAmelCase__ : Optional[int] = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: lowerCAmelCase__ : Optional[Any] = text def UpperCAmelCase_ ( self ) -> List[Any]: if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) lowerCAmelCase__ : Union[str, Any] = None def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: self.generated_responses.append(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ) -> Tuple: lowerCAmelCase__ : Tuple = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): lowerCAmelCase__ : Any = """user""" if is_user else """bot""" output += F"""{name} >> {text} \n""" return output @add_end_docstrings( SCREAMING_SNAKE_CASE_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase ) if self.tokenizer.pad_token_id is None: lowerCAmelCase__ : Tuple = self.tokenizer.eos_token def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : List[Any] = {} lowerCAmelCase__ : Optional[int] = {} lowerCAmelCase__ : List[str] = {} if min_length_for_response is not None: lowerCAmelCase__ : Any = min_length_for_response if minimum_tokens is not None: lowerCAmelCase__ : Optional[int] = minimum_tokens if "max_length" in generate_kwargs: lowerCAmelCase__ : Optional[Any] = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: lowerCAmelCase__ : int = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ,**__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : Optional[int] = super().__call__(__UpperCAmelCase ,num_workers=__UpperCAmelCase ,**__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=32 ) -> Dict[str, Any]: if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ): lowerCAmelCase__ : str = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version lowerCAmelCase__ : List[Any] = self._legacy_parse_and_tokenize(__UpperCAmelCase ) if self.framework == "pt": lowerCAmelCase__ : List[Any] = torch.LongTensor([input_ids] ) elif self.framework == "tf": lowerCAmelCase__ : Dict = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=10 ,**__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Optional[Any] = generate_kwargs.get("""max_length""" ,self.model.config.max_length ) lowerCAmelCase__ : Optional[Any] = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) lowerCAmelCase__ : str = max_length - minimum_tokens lowerCAmelCase__ : Union[str, Any] = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: lowerCAmelCase__ : Tuple = model_inputs["""attention_mask"""][:, -trim:] lowerCAmelCase__ : str = model_inputs.pop("""conversation""" ) lowerCAmelCase__ : Union[str, Any] = max_length lowerCAmelCase__ : Any = self.model.generate(**__UpperCAmelCase ,**__UpperCAmelCase ) if self.model.config.is_encoder_decoder: lowerCAmelCase__ : int = 1 else: lowerCAmelCase__ : int = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=True ) -> List[str]: lowerCAmelCase__ : Optional[int] = model_outputs["""output_ids"""] lowerCAmelCase__ : Tuple = self.tokenizer.decode( output_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,) lowerCAmelCase__ : Union[str, Any] = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(__UpperCAmelCase ) return conversation def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Dict = self.tokenizer.eos_token_id lowerCAmelCase__ : int = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > self.tokenizer.model_max_length: lowerCAmelCase__ : Optional[Any] = input_ids[-self.tokenizer.model_max_length :] return input_ids
37
def a__ ( _UpperCamelCase : int ): if not isinstance(_UpperCamelCase ,_UpperCamelCase ): __lowerCamelCase = F"""Input value of [number={number}] must be an integer""" raise TypeError(_UpperCamelCase ) if number < 0: return False __lowerCamelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
330
0
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase_ : Optional[Any] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class _SCREAMING_SNAKE_CASE : snake_case__ : Optional[str] = field( default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} ) snake_case__ : Optional[str] = field( default=_a , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) snake_case__ : Optional[str] = field( default=_a , metadata={"""help""": """The column name of the images in the files."""} ) snake_case__ : Optional[str] = field(default=_a , metadata={"""help""": """A folder containing the training data."""} ) snake_case__ : Optional[str] = field(default=_a , metadata={"""help""": """A folder containing the validation data."""} ) snake_case__ : Optional[float] = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) snake_case__ : Optional[int] = field( default=_a , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) snake_case__ : Optional[int] = field( default=_a , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def _A ( self : Dict ): UpperCamelCase :Dict = {} if self.train_dir is not None: UpperCamelCase :Any = self.train_dir if self.validation_dir is not None: UpperCamelCase :List[Any] = self.validation_dir UpperCamelCase :Optional[Any] = data_files if data_files else None @dataclass class _SCREAMING_SNAKE_CASE : snake_case__ : str = field( default=_a , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) snake_case__ : Optional[str] = field( default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} ) snake_case__ : Optional[str] = field( default=_a , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) snake_case__ : Optional[str] = field( default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} ) snake_case__ : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) snake_case__ : str = field(default=_a , metadata={"""help""": """Name or path of preprocessor config."""} ) snake_case__ : bool = field( default=_a , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) snake_case__ : float = field( default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} ) snake_case__ : bool = field( default=_a , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} ) @dataclass class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : float = field( default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] ) -> Dict: """simple docstring""" UpperCamelCase :Tuple = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: """simple docstring""" UpperCamelCase :Any = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase , UpperCamelCase , UpperCamelCase :str = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , __magic_name__ , __magic_name__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase :List[str] = training_args.get_process_log_level() logger.setLevel(__magic_name__ ) transformers.utils.logging.set_verbosity(__magic_name__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. UpperCamelCase :List[str] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase :Tuple = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. UpperCamelCase :Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. UpperCamelCase :Optional[Any] = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __magic_name__ ) and data_args.train_val_split > 0.0: UpperCamelCase :Any = ds["""train"""].train_test_split(data_args.train_val_split ) UpperCamelCase :Optional[Any] = split["""train"""] UpperCamelCase :int = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase :Tuple = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: UpperCamelCase :str = ViTMAEConfig.from_pretrained(model_args.config_name , **__magic_name__ ) elif model_args.model_name_or_path: UpperCamelCase :List[Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__magic_name__ ) else: UpperCamelCase :int = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(f"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(f"""New config: {config}""" ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: UpperCamelCase :Any = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__magic_name__ ) elif model_args.model_name_or_path: UpperCamelCase :List[str] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__magic_name__ ) else: UpperCamelCase :List[str] = ViTImageProcessor() # create model if model_args.model_name_or_path: UpperCamelCase :List[Any] = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) UpperCamelCase :str = ViTMAEForPreTraining(__magic_name__ ) if training_args.do_train: UpperCamelCase :int = ds["""train"""].column_names else: UpperCamelCase :List[str] = ds["""validation"""].column_names if data_args.image_column_name is not None: UpperCamelCase :List[Any] = data_args.image_column_name elif "image" in column_names: UpperCamelCase :Any = """image""" elif "img" in column_names: UpperCamelCase :Union[str, Any] = """img""" else: UpperCamelCase :Optional[int] = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: UpperCamelCase :Union[str, Any] = image_processor.size["""shortest_edge"""] else: UpperCamelCase :List[str] = (image_processor.size["""height"""], image_processor.size["""width"""]) UpperCamelCase :Optional[int] = Compose( [ Lambda(lambda __magic_name__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(__magic_name__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(__magic_name__ : Union[str, Any] ): UpperCamelCase :int = [transforms(__magic_name__ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: UpperCamelCase :List[str] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__magic_name__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: UpperCamelCase :Dict = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__magic_name__ ) # Compute absolute learning rate UpperCamelCase :Optional[Any] = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: UpperCamelCase :Optional[int] = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer UpperCamelCase :str = Trainer( model=__magic_name__ , args=__magic_name__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__magic_name__ , data_collator=__magic_name__ , ) # Training if training_args.do_train: UpperCamelCase :Optional[int] = None if training_args.resume_from_checkpoint is not None: UpperCamelCase :int = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase :Any = last_checkpoint UpperCamelCase :int = trainer.train(resume_from_checkpoint=__magic_name__ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCamelCase :List[str] = trainer.evaluate() trainer.log_metrics("""eval""" , __magic_name__ ) trainer.save_metrics("""eval""" , __magic_name__ ) # Write model card and (optionally) push to hub UpperCamelCase :Any = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**__magic_name__ ) else: trainer.create_model_card(**__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> Union[str, Any]: """simple docstring""" main() if __name__ == "__main__": main()
38
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={"_".join([str(__UpperCAmelCase ) for s in shape] )}.npy""" def lowerCamelCase ( self ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 4, 64, 64) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return image def lowerCamelCase ( self , __UpperCAmelCase=False , __UpperCAmelCase="CompVis/stable-diffusion-v1-4" ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = '''bf16''' if fpaa else None __lowerCamelCase ,__lowerCamelCase = FlaxUNetaDConditionModel.from_pretrained( __UpperCAmelCase , subfolder='''unet''' , dtype=__UpperCAmelCase , revision=__UpperCAmelCase ) return model, params def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 77, 768) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]], [17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]], [8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]], [3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]], [17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]], [8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]], [3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , shape=(4, 4, 96, 96) , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , shape=(4, 77, 1024) , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
330
0
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __lowerCamelCase ( snake_case__): """simple docstring""" UpperCamelCase__ = 42 class __lowerCamelCase ( snake_case__ , snake_case__): """simple docstring""" @register_to_config def __init__( self , UpperCAmelCase = 3 , UpperCAmelCase = 3 , UpperCAmelCase = ("DownEncoderBlock2D",) , UpperCAmelCase = ("UpDecoderBlock2D",) , UpperCAmelCase = (64,) , UpperCAmelCase = 1 , UpperCAmelCase = "silu" , UpperCAmelCase = 3 , UpperCAmelCase = 32 , UpperCAmelCase = 256 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = 0.1_82_15 , UpperCAmelCase = "group" , ): """simple docstring""" super().__init__() # pass init params to Encoder _UpperCAmelCase = Encoder( in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , down_block_types=UpperCAmelCase , block_out_channels=UpperCAmelCase , layers_per_block=UpperCAmelCase , act_fn=UpperCAmelCase , norm_num_groups=UpperCAmelCase , double_z=UpperCAmelCase , ) _UpperCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels _UpperCAmelCase = nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 ) _UpperCAmelCase = VectorQuantizer(UpperCAmelCase , UpperCAmelCase , beta=0.25 , remap=UpperCAmelCase , sane_index_shape=UpperCAmelCase ) _UpperCAmelCase = nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 ) # pass init params to Decoder _UpperCAmelCase = Decoder( in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , up_block_types=UpperCAmelCase , block_out_channels=UpperCAmelCase , layers_per_block=UpperCAmelCase , act_fn=UpperCAmelCase , norm_num_groups=UpperCAmelCase , norm_type=UpperCAmelCase , ) @apply_forward_hook def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = True ): """simple docstring""" _UpperCAmelCase = self.encoder(UpperCAmelCase ) _UpperCAmelCase = self.quant_conv(UpperCAmelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=UpperCAmelCase ) @apply_forward_hook def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = True ): """simple docstring""" if not force_not_quantize: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.quantize(UpperCAmelCase ) else: _UpperCAmelCase = h _UpperCAmelCase = self.post_quant_conv(UpperCAmelCase ) _UpperCAmelCase = self.decoder(UpperCAmelCase , quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=UpperCAmelCase ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = True ): """simple docstring""" _UpperCAmelCase = sample _UpperCAmelCase = self.encode(UpperCAmelCase ).latents _UpperCAmelCase = self.decode(UpperCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=UpperCAmelCase )
39
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class _A ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : str=7 , __UpperCAmelCase : str=3 , __UpperCAmelCase : Tuple=18 , __UpperCAmelCase : Dict=30 , __UpperCAmelCase : Any=400 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Tuple=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __UpperCAmelCase : Optional[Any]=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __UpperCAmelCase : Union[str, Any]=True , ): a : int = size if size is not None else {"height": 224, "width": 224} a : List[str] = crop_size if crop_size is not None else {"height": 18, "width": 18} a : List[Any] = parent a : Any = batch_size a : str = num_channels a : Optional[int] = image_size a : Tuple = min_resolution a : str = max_resolution a : Dict = do_resize a : Any = size a : Dict = do_center_crop a : List[str] = crop_size a : str = do_normalize a : Optional[int] = image_mean a : Tuple = image_std a : Any = do_convert_rgb def __snake_case ( self : Union[str, Any]): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def __snake_case ( self : Dict , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Any=False): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: a : str = [] for i in range(self.batch_size): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta)) else: a : Tuple = [] for i in range(self.batch_size): a , a : List[str] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta)) if not numpify and not torchify: # PIL expects the channel dimension as last dimension a : List[Any] = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1)) for x in image_inputs] if torchify: a : Optional[Any] = [torch.from_numpy(__UpperCAmelCase) for x in image_inputs] return image_inputs @require_torch @require_vision class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Union[str, Any] = ChineseCLIPImageProcessor if is_vision_available() else None def __snake_case ( self : List[str]): a : Dict = ChineseCLIPImageProcessingTester(self , do_center_crop=__UpperCAmelCase) @property def __snake_case ( self : Union[str, Any]): return self.image_processor_tester.prepare_image_processor_dict() def __snake_case ( self : str): a : Optional[int] = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__UpperCAmelCase , "do_resize")) self.assertTrue(hasattr(__UpperCAmelCase , "size")) self.assertTrue(hasattr(__UpperCAmelCase , "do_center_crop")) self.assertTrue(hasattr(__UpperCAmelCase , "center_crop")) self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize")) self.assertTrue(hasattr(__UpperCAmelCase , "image_mean")) self.assertTrue(hasattr(__UpperCAmelCase , "image_std")) self.assertTrue(hasattr(__UpperCAmelCase , "do_convert_rgb")) def __snake_case ( self : Any): a : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"height": 224, "width": 224}) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18}) a : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84) self.assertEqual(image_processor.size , {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84}) def __snake_case ( self : str): pass def __snake_case ( self : Tuple): # Initialize image_processing a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images a : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , Image.Image) # Test not batched input a : str = image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a : Dict = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __snake_case ( self : List[Any]): # Initialize image_processing a : str = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors a : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , np.ndarray) # Test not batched input a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a : Union[str, Any] = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __snake_case ( self : List[str]): # Initialize image_processing a : str = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors a : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , torch.Tensor) # Test not batched input a : List[str] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a : str = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) @require_torch @require_vision class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None def __snake_case ( self : Union[str, Any]): a : str = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__UpperCAmelCase) a : Dict = 3 @property def __snake_case ( self : Optional[Any]): return self.image_processor_tester.prepare_image_processor_dict() def __snake_case ( self : Optional[int]): a : Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__UpperCAmelCase , "do_resize")) self.assertTrue(hasattr(__UpperCAmelCase , "size")) self.assertTrue(hasattr(__UpperCAmelCase , "do_center_crop")) self.assertTrue(hasattr(__UpperCAmelCase , "center_crop")) self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize")) self.assertTrue(hasattr(__UpperCAmelCase , "image_mean")) self.assertTrue(hasattr(__UpperCAmelCase , "image_std")) self.assertTrue(hasattr(__UpperCAmelCase , "do_convert_rgb")) def __snake_case ( self : Any): pass def __snake_case ( self : Union[str, Any]): # Initialize image_processing a : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images a : str = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , Image.Image) # Test not batched input a : Tuple = image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a : Optional[Any] = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
40
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def a__ ( _UpperCamelCase : Optional[int] ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __lowerCamelCase = module __lowerCamelCase = nn.Sequential( nn.Linear(module.in_features , __UpperCAmelCase , bias=__UpperCAmelCase ) , nn.Linear(__UpperCAmelCase , module.out_features , bias=__UpperCAmelCase ) , ) __lowerCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__UpperCAmelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowerCamelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.module(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) + self.adapter(__UpperCAmelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module lowerCAmelCase__ = """bigscience/bloom-1b7""" # Constant values lowerCAmelCase__ = 2.1_09_65_95_52_69_25_74 lowerCAmelCase__ = """Hello my name is""" lowerCAmelCase__ = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) lowerCAmelCase__ = 1_0 def lowerCamelCase ( self ): '''simple docstring''' # Models and tokenizer __lowerCamelCase = AutoTokenizer.from_pretrained(self.model_name ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # Models and tokenizer __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='''auto''' ) __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_abit.config self.assertTrue(hasattr(__UpperCAmelCase , '''quantization_config''' ) ) __lowerCamelCase = config.to_dict() __lowerCamelCase = config.to_diff_dict() __lowerCamelCase = config.to_json_string() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit __lowerCamelCase = self.model_fpaa.get_memory_footprint() __lowerCamelCase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __lowerCamelCase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__UpperCAmelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() __lowerCamelCase = True __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = model_abit_from_config.generate( input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() with self.assertRaises(__UpperCAmelCase ): __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ): # Tries with `str` self.model_abit.to('''cpu''' ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.to(torch.device('''cuda:0''' ) ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_fpaa.to(torch.floataa ) __lowerCamelCase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.to('''cpu''' ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.half() # Check this does not throw an error __lowerCamelCase = self.model_fpaa.float() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): @classmethod def lowerCamelCase ( cls ): '''simple docstring''' __lowerCamelCase = '''t5-small''' __lowerCamelCase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense __lowerCamelCase = AutoTokenizer.from_pretrained(cls.model_name ) __lowerCamelCase = '''Translate in German: Hello, my dog is cute''' def lowerCamelCase ( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaForConditionalGeneration __lowerCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules __lowerCamelCase = None # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) __lowerCamelCase = modules def lowerCamelCase ( self ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # model_name __lowerCamelCase = '''bigscience/bloom-560m''' __lowerCamelCase = '''t5-small''' # Different types of model __lowerCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Sequence classification model __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # CausalLM model __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Seq2seq model __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = pipeline( '''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __lowerCamelCase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''balanced''' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) # Second real batch __lowerCamelCase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = '''facebook/opt-350m''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ): return # Step 1: freeze all parameters __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __lowerCamelCase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __lowerCamelCase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__UpperCAmelCase ) ): __lowerCamelCase = LoRALayer(module.q_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.k_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __lowerCamelCase = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __lowerCamelCase = model.forward(**__UpperCAmelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__UpperCAmelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = """gpt2-xl""" lowerCAmelCase__ = 3.31_91_85_48_54_15_21_87
330
0
'''simple docstring''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list: if len(UpperCamelCase ) <= 1: return [tuple(UpperCamelCase )] lowerCamelCase__ : Union[str, Any] = [] def generate(UpperCamelCase , UpperCamelCase ): lowerCamelCase__ : List[str] = [0] * n res.append(tuple(UpperCamelCase ) ) lowerCamelCase__ : str = 0 while i < n: if c[i] < i: if i % 2 == 0: lowerCamelCase__ , lowerCamelCase__ : Dict = arr[i], arr[0] else: lowerCamelCase__ , lowerCamelCase__ : Dict = arr[i], arr[c[i]] res.append(tuple(UpperCamelCase ) ) c[i] += 1 lowerCamelCase__ : Optional[int] = 0 else: lowerCamelCase__ : Dict = 0 i += 1 generate(len(UpperCamelCase ) , UpperCamelCase ) return res if __name__ == "__main__": _A : str =input('''Enter numbers separated by a comma:\n''').strip() _A : Optional[int] =[int(item) for item in user_input.split(''',''')] print(heaps(arr))
41
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = 42 class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ = True @register_to_config def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (64,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 4 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 0.18_215 , ): '''simple docstring''' super().__init__() # pass init params to Encoder __lowerCamelCase = Encoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , ) # pass init params to Decoder __lowerCamelCase = Decoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , ) __lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __lowerCamelCase = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 ) __lowerCamelCase = False __lowerCamelCase = False # only relevant if vae tiling is enabled __lowerCamelCase = self.config.sample_size __lowerCamelCase = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __lowerCamelCase = 0.25 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' if isinstance(__UpperCAmelCase , (Encoder, Decoder) ): __lowerCamelCase = value def lowerCamelCase ( self , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = use_tiling def lowerCamelCase ( self ): '''simple docstring''' self.enable_tiling(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = True def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = {} def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): __lowerCamelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return processors def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = len(self.attn_processors.keys() ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count: raise ValueError( F"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the""" F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): module.set_processor(__UpperCAmelCase ) else: module.set_processor(processor.pop(F"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) if self.use_slicing and x.shape[0] > 1: __lowerCamelCase = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_slicing and z.shape[0] > 1: __lowerCamelCase = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self._decode(__UpperCAmelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[2] , b.shape[2] , __UpperCAmelCase ) for y in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[3] , b.shape[3] , __UpperCAmelCase ) for x in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __lowerCamelCase = [] for i in range(0 , x.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , x.shape[3] , __UpperCAmelCase ): __lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __lowerCamelCase = [] for i in range(0 , z.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , z.shape[3] , __UpperCAmelCase ): __lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ): '''simple docstring''' __lowerCamelCase = sample __lowerCamelCase = self.encode(__UpperCAmelCase ).latent_dist if sample_posterior: __lowerCamelCase = posterior.sample(generator=__UpperCAmelCase ) else: __lowerCamelCase = posterior.mode() __lowerCamelCase = self.decode(__UpperCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase )
330
0
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( __A , __A = None , __A = None ) -> None: if start is None: _snake_case = 0 if end is None: _snake_case = len(__A ) - 1 if start >= end: return _snake_case = (start + end) // 2 slowsort(__A , __A , __A ) slowsort(__A , mid + 1 , __A ) if sequence[end] < sequence[mid]: _snake_case , _snake_case = sequence[mid], sequence[end] slowsort(__A , __A , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
42
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration a_ = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] a_ = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] a_ = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) a_ = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) a_ = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ): for tf_name, hf_name in patterns: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase ) __lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() __lowerCamelCase = {} # separating decoder weights __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = DECODER_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = REMAINING_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" __lowerCamelCase = mapping['''model.embed_positions.weight'''] __lowerCamelCase = mapping.pop('''model.embed_positions.weight''' ) __lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : int ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ): __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() a_ = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
330
0
import random def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Optional[Any] = a[left_index] __UpperCamelCase :Any = left_index + 1 for j in range(left_index + 1 , SCREAMING_SNAKE_CASE ): if a[j] < pivot: __UpperCamelCase , __UpperCamelCase :str = a[i], a[j] i += 1 __UpperCamelCase , __UpperCamelCase :Optional[int] = a[i - 1], a[left_index] return i - 1 def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' if left < right: __UpperCamelCase :int = random.randint(SCREAMING_SNAKE_CASE , right - 1 ) __UpperCamelCase , __UpperCamelCase :List[str] = ( a[left], a[pivot], ) # switches the pivot with the left most bound __UpperCamelCase :Dict = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) quick_sort_random( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point quick_sort_random( SCREAMING_SNAKE_CASE , pivot_index + 1 , SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :Tuple = input('''Enter numbers separated by a comma:\n''' ).strip() __UpperCamelCase :Union[str, Any] = [int(SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )] quick_sort_random(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) ) print(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
43
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch a_ = logging.get_logger(__name__) class __lowerCAmelCase : def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase=None ): '''simple docstring''' if not conversation_id: __lowerCamelCase = uuid.uuida() if past_user_inputs is None: __lowerCamelCase = [] if generated_responses is None: __lowerCamelCase = [] __lowerCamelCase = conversation_id __lowerCamelCase = past_user_inputs __lowerCamelCase = generated_responses __lowerCamelCase = text def __eq__( self , __UpperCAmelCase ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) __lowerCamelCase = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: __lowerCamelCase = text def lowerCamelCase ( self ): '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __lowerCamelCase = None def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' self.generated_responses.append(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): '''simple docstring''' __lowerCamelCase = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): __lowerCamelCase = '''user''' if is_user else '''bot''' output += F"""{name} >> {text} \n""" return output @add_end_docstrings( lowerCAmelCase__ , r""" min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. """ , ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) if self.tokenizer.pad_token_id is None: __lowerCamelCase = self.tokenizer.eos_token def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = {} __lowerCamelCase = {} __lowerCamelCase = {} if min_length_for_response is not None: __lowerCamelCase = min_length_for_response if minimum_tokens is not None: __lowerCamelCase = minimum_tokens if "max_length" in generate_kwargs: __lowerCamelCase = generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __lowerCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self , __UpperCAmelCase , __UpperCAmelCase=0 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = super().__call__(__UpperCAmelCase , num_workers=__UpperCAmelCase , **__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=32 ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ): __lowerCamelCase = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version __lowerCamelCase = self._legacy_parse_and_tokenize(__UpperCAmelCase ) if self.framework == "pt": __lowerCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": __lowerCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=10 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) __lowerCamelCase = model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) __lowerCamelCase = max_length - minimum_tokens __lowerCamelCase = model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: __lowerCamelCase = model_inputs['''attention_mask'''][:, -trim:] __lowerCamelCase = model_inputs.pop('''conversation''' ) __lowerCamelCase = max_length __lowerCamelCase = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase ) if self.model.config.is_encoder_decoder: __lowerCamelCase = 1 else: __lowerCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = model_outputs['''output_ids'''] __lowerCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , ) __lowerCamelCase = model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(__UpperCAmelCase ) return conversation def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.tokenizer.eos_token_id __lowerCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > self.tokenizer.model_max_length: __lowerCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
330
0
"""simple docstring""" import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class __A ( unittest.TestCase ): def __A ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __A ( self ): _lowerCAmelCase : List[str] = 1 _lowerCAmelCase : Tuple = 3 _lowerCAmelCase : Optional[int] = (32, 32) _lowerCAmelCase : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a__ ) return image @property def __A ( self ): torch.manual_seed(0 ) _lowerCAmelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) return model @property def __A ( self ): torch.manual_seed(0 ) _lowerCAmelCase : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def __A ( self ): torch.manual_seed(0 ) _lowerCAmelCase : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(a__ ) @property def __A ( self ): def extract(*a__ , **a__ ): class __A : def __init__( self ): _lowerCAmelCase : Dict = torch.ones([0] ) def __A ( self , a__ ): self.pixel_values.to(a__ ) return self return Out() return extract def __A ( self ): _lowerCAmelCase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase : Optional[Any] = self.dummy_cond_unet _lowerCAmelCase : Any = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=a__ , set_alpha_to_one=a__ , ) _lowerCAmelCase : str = self.dummy_vae _lowerCAmelCase : Tuple = self.dummy_text_encoder _lowerCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk _lowerCAmelCase : Optional[int] = StableDiffusionPipeline( unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , ) _lowerCAmelCase : int = sd_pipe.to(a__ ) sd_pipe.set_progress_bar_config(disable=a__ ) _lowerCAmelCase : Optional[Any] = """A painting of a squirrel eating a burger""" _lowerCAmelCase : List[Any] = torch.Generator(device=a__ ).manual_seed(0 ) _lowerCAmelCase : Union[str, Any] = sd_pipe([prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" ) _lowerCAmelCase : Any = output.images _lowerCAmelCase : str = torch.Generator(device=a__ ).manual_seed(0 ) _lowerCAmelCase : int = sd_pipe( [prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=a__ , )[0] _lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] _lowerCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _lowerCAmelCase : Dict = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self ): _lowerCAmelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase : int = self.dummy_cond_unet _lowerCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=a__ ) _lowerCAmelCase : List[str] = self.dummy_vae _lowerCAmelCase : Any = self.dummy_text_encoder _lowerCAmelCase : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk _lowerCAmelCase : str = StableDiffusionPipeline( unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , ) _lowerCAmelCase : Optional[Any] = sd_pipe.to(a__ ) sd_pipe.set_progress_bar_config(disable=a__ ) _lowerCAmelCase : str = """A painting of a squirrel eating a burger""" _lowerCAmelCase : str = torch.Generator(device=a__ ).manual_seed(0 ) _lowerCAmelCase : Tuple = sd_pipe([prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" ) _lowerCAmelCase : Tuple = output.images _lowerCAmelCase : Dict = torch.Generator(device=a__ ).manual_seed(0 ) _lowerCAmelCase : List[str] = sd_pipe( [prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=a__ , )[0] _lowerCAmelCase : Any = image[0, -3:, -3:, -1] _lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _lowerCAmelCase : Union[str, Any] = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self ): _lowerCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=a__ ) assert isinstance(a__ , a__ ) assert isinstance(pipe.scheduler , a__ ) assert pipe.safety_checker is None _lowerCAmelCase : Any = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a__ ) _lowerCAmelCase : str = StableDiffusionPipeline.from_pretrained(a__ ) # sanity check that the pipeline still works assert pipe.safety_checker is None _lowerCAmelCase : Optional[Any] = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def __A ( self ): _lowerCAmelCase : int = self.dummy_cond_unet _lowerCAmelCase : str = PNDMScheduler(skip_prk_steps=a__ ) _lowerCAmelCase : Any = self.dummy_vae _lowerCAmelCase : Dict = self.dummy_text_encoder _lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # put models in fp16 _lowerCAmelCase : str = unet.half() _lowerCAmelCase : List[str] = vae.half() _lowerCAmelCase : Tuple = bert.half() # make sure here that pndm scheduler skips prk _lowerCAmelCase : Dict = StableDiffusionPipeline( unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , ) _lowerCAmelCase : int = sd_pipe.to(a__ ) sd_pipe.set_progress_bar_config(disable=a__ ) _lowerCAmelCase : int = """A painting of a squirrel eating a burger""" _lowerCAmelCase : Dict = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class __A ( unittest.TestCase ): def __A ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self ): _lowerCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=a__ ) _lowerCAmelCase : List[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) _lowerCAmelCase : Optional[int] = sd_pipe.to(a__ ) sd_pipe.set_progress_bar_config(disable=a__ ) _lowerCAmelCase : Dict = ( """portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle""" """ coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with""" """ anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and""" """ children from bahnhof zoo, detailed """ ) _lowerCAmelCase : List[Any] = 4003660346 _lowerCAmelCase : List[Any] = 7 # without safety guidance (sld_guidance_scale = 0) _lowerCAmelCase : int = torch.manual_seed(a__ ) _lowerCAmelCase : Dict = sd_pipe( [prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , ) _lowerCAmelCase : int = output.images _lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] _lowerCAmelCase : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) _lowerCAmelCase : Optional[int] = torch.manual_seed(a__ ) _lowerCAmelCase : List[str] = sd_pipe( [prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _lowerCAmelCase : Optional[int] = output.images _lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] _lowerCAmelCase : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self ): _lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=a__ ) _lowerCAmelCase : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) _lowerCAmelCase : Optional[Any] = sd_pipe.to(a__ ) sd_pipe.set_progress_bar_config(disable=a__ ) _lowerCAmelCase : List[str] = """padme amidala taking a bath artwork, safe for work, no nudity""" _lowerCAmelCase : Tuple = 2734971755 _lowerCAmelCase : Union[str, Any] = 7 _lowerCAmelCase : Optional[int] = torch.manual_seed(a__ ) _lowerCAmelCase : Any = sd_pipe( [prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , ) _lowerCAmelCase : int = output.images _lowerCAmelCase : int = image[0, -3:, -3:, -1] _lowerCAmelCase : Dict = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 _lowerCAmelCase : Optional[Any] = torch.manual_seed(a__ ) _lowerCAmelCase : Any = sd_pipe( [prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _lowerCAmelCase : List[Any] = output.images _lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1] _lowerCAmelCase : Optional[int] = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self ): _lowerCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ) _lowerCAmelCase : Any = sd_pipe.to(a__ ) sd_pipe.set_progress_bar_config(disable=a__ ) _lowerCAmelCase : Dict = ( """the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.""" """ leyendecker""" ) _lowerCAmelCase : int = 1044355234 _lowerCAmelCase : Tuple = 12 _lowerCAmelCase : int = torch.manual_seed(a__ ) _lowerCAmelCase : str = sd_pipe( [prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , ) _lowerCAmelCase : List[str] = output.images _lowerCAmelCase : List[str] = image[0, -3:, -3:, -1] _lowerCAmelCase : int = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 _lowerCAmelCase : Optional[int] = torch.manual_seed(a__ ) _lowerCAmelCase : List[Any] = sd_pipe( [prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _lowerCAmelCase : List[str] = output.images _lowerCAmelCase : Any = image[0, -3:, -3:, -1] _lowerCAmelCase : List[str] = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
44
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params a_ = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["""memory_attention""", """encoder_attn"""], ["""attention""", """attn"""], ["""/""", """."""], [""".LayerNorm.gamma""", """_layer_norm.weight"""], [""".LayerNorm.beta""", """_layer_norm.bias"""], ["""r.layer_""", """r.layers."""], ["""output_proj""", """out_proj"""], ["""ffn.dense_1.""", """fc2."""], ["""ffn.dense.""", """fc1."""], ["""ffn_layer_norm""", """final_layer_norm"""], ["""kernel""", """weight"""], ["""encoder_layer_norm.""", """encoder.layer_norm."""], ["""decoder_layer_norm.""", """decoder.layer_norm."""], ["""embeddings.weights""", """shared.weight"""], ] def a__ ( _UpperCamelCase : int ): for pegasus_name, hf_name in PATTERNS: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = DEFAULTS.copy() cfg_kwargs.update(_UpperCamelCase ) __lowerCamelCase = PegasusConfig(**_UpperCamelCase ) __lowerCamelCase = PegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.model.state_dict() __lowerCamelCase = {} for k, v in tf_weights.items(): __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ) if new_k not in sd: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if "dense" in k or "proj" in new_k: __lowerCamelCase = v.T __lowerCamelCase = torch.tensor(_UpperCamelCase ,dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}""" # make sure embedding.padding_idx is respected __lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] ) __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping} mapping.update(**_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = torch_model.model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight'''] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : str="./ckpt/aeslc/model.ckpt-32000" ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''Adafactor''', '''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # save tokenizer first __lowerCamelCase = Path(_UpperCamelCase ).parent.name __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings'''] __lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' ,model_max_length=_UpperCamelCase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(_UpperCamelCase ) # convert model __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""] if dataset == "large": __lowerCamelCase = task_specific_params __lowerCamelCase = convert_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() sd.pop('''model.decoder.embed_positions.weight''' ) sd.pop('''model.encoder.embed_positions.weight''' ) torch.save(_UpperCamelCase ,Path(_UpperCamelCase ) / '''pytorch_model.bin''' ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() if args.save_dir is None: a_ = Path(args.tf_ckpt_path).parent.name a_ = os.path.join("""pegasus""", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
330
0
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip lowercase_ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowercase ( lowerCAmelCase__ : Optional[int] ) -> Optional[Any]: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str ) -> List[str]: return max(metric_fn(lowerCAmelCase__ , lowerCAmelCase__ ) for gt in ground_truths ) def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ) -> int: __a = [line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()] __a = [] if args.gold_data_mode == "qa": __a = pd.read_csv(lowerCAmelCase__ , sep='''\t''' , header=lowerCAmelCase__ ) for answer_list in data[1]: __a = ast.literal_eval(lowerCAmelCase__ ) answers.append(lowerCAmelCase__ ) else: __a = [line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()] __a = [[reference] for reference in references] __a = __a = __a = 0 for prediction, ground_truths in zip(lowerCAmelCase__ , lowerCAmelCase__ ): total += 1 em += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) fa += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __a = 1_00.0 * em / total __a = 1_00.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any ) -> Any: __a = args.k __a = [line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()] __a = [line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()] __a = __a = 0 for hypo, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ): __a = set(hypo.split('''\t''' )[:k] ) __a = set(reference.split('''\t''' ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k __a = 1_00.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> Dict: def strip_title(lowerCAmelCase__ : Tuple ): if title.startswith('''"''' ): __a = title[1:] if title.endswith('''"''' ): __a = title[:-1] return title __a = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( lowerCAmelCase__ , return_tensors='''pt''' , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , )['''input_ids'''].to(args.device ) __a = rag_model.rag.question_encoder(lowerCAmelCase__ ) __a = question_enc_outputs[0] __a = rag_model.retriever( lowerCAmelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , ) __a = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) __a = [] for docs in all_docs: __a = [strip_title(lowerCAmelCase__ ) for title in docs['''title''']] provenance_strings.append('''\t'''.join(lowerCAmelCase__ ) ) return provenance_strings def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any ) -> Any: with torch.no_grad(): __a = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( lowerCAmelCase__ , return_tensors='''pt''' , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ ) __a = inputs_dict.input_ids.to(args.device ) __a = inputs_dict.attention_mask.to(args.device ) __a = rag_model.generate( # rag_model overwrites generate lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) __a = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) if args.print_predictions: for q, a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): logger.info('''Q: {} - A: {}'''.format(lowerCAmelCase__ , lowerCAmelCase__ ) ) return answers def lowercase ( ) -> Optional[Any]: __a = argparse.ArgumentParser() parser.add_argument( '''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=lowerCAmelCase__ , help=( '''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the''' ''' model_name_or_path''' ) , ) parser.add_argument( '''--index_name''' , default=lowerCAmelCase__ , choices=['''exact''', '''compressed''', '''legacy'''] , type=lowerCAmelCase__ , help='''RAG model retriever type''' , ) parser.add_argument( '''--index_path''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='''Path to the retrieval index''' , ) parser.add_argument('''--n_docs''' , default=5 , type=lowerCAmelCase__ , help='''Number of retrieved docs''' ) parser.add_argument( '''--model_name_or_path''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=lowerCAmelCase__ , help=( '''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates''' ''' precision@k.''' ) , ) parser.add_argument('''--k''' , default=1 , type=lowerCAmelCase__ , help='''k for the precision@k calculation''' ) parser.add_argument( '''--evaluation_set''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to a file containing evaluation samples''' , ) parser.add_argument( '''--gold_data_path''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to a tab-separated file with gold samples''' , ) parser.add_argument( '''--gold_data_mode''' , default='''qa''' , type=lowerCAmelCase__ , choices=['''qa''', '''ans'''] , help=( '''Format of the gold data file''' '''qa - a single line in the following format: question [tab] answer_list''' '''ans - a single line of the gold file contains the expected answer string''' ) , ) parser.add_argument( '''--predictions_path''' , type=lowerCAmelCase__ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , ) parser.add_argument( '''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , ) parser.add_argument( '''--eval_batch_size''' , default=8 , type=lowerCAmelCase__ , help='''Batch size per GPU/CPU for evaluation.''' , ) parser.add_argument( '''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , ) parser.add_argument( '''--num_beams''' , default=4 , type=lowerCAmelCase__ , help='''Number of beams to be used when generating answers''' , ) parser.add_argument('''--min_length''' , default=1 , type=lowerCAmelCase__ , help='''Min length of the generated answers''' ) parser.add_argument('''--max_length''' , default=50 , type=lowerCAmelCase__ , help='''Max length of the generated answers''' ) parser.add_argument( '''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , ) parser.add_argument( '''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , ) __a = parser.parse_args() __a = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) return args def lowercase ( lowerCAmelCase__ : Union[str, Any] ) -> str: __a = {} if args.model_type is None: __a = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith('''rag''' ): __a = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration __a = args.n_docs if args.index_name is not None: __a = args.index_name if args.index_path is not None: __a = args.index_path else: __a = BartForConditionalGeneration __a = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info('''Evaluate the following checkpoints: %s''' , lowerCAmelCase__ ) __a = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k __a = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) ) score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path ) continue logger.info('''***** Running evaluation for {} *****'''.format(lowerCAmelCase__ ) ) logger.info(''' Batch size = %d''' , args.eval_batch_size ) logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) ) if args.model_type.startswith('''rag''' ): __a = RagRetriever.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) __a = model_class.from_pretrained(lowerCAmelCase__ , retriever=lowerCAmelCase__ , **lowerCAmelCase__ ) model.retriever.init_retrieval() else: __a = model_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) model.to(args.device ) with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file: __a = [] for line in tqdm(lowerCAmelCase__ ): questions.append(line.strip() ) if len(lowerCAmelCase__ ) == args.eval_batch_size: __a = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) preds_file.write('''\n'''.join(lowerCAmelCase__ ) + '''\n''' ) preds_file.flush() __a = [] if len(lowerCAmelCase__ ) > 0: __a = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) preds_file.write('''\n'''.join(lowerCAmelCase__ ) ) preds_file.flush() score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": lowercase_ = get_args() main(args)
45
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } a_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ): for attribute in key.split('''.''' ): __lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ) if weight_type is not None: __lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ).shape else: __lowerCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCamelCase = value elif weight_type == "weight_g": __lowerCamelCase = value elif weight_type == "weight_v": __lowerCamelCase = value elif weight_type == "bias": __lowerCamelCase = value else: __lowerCamelCase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Any ): __lowerCamelCase = [] __lowerCamelCase = fairseq_model.state_dict() __lowerCamelCase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __lowerCamelCase = False if "conv_layers" in name: load_conv_layer( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,) __lowerCamelCase = True else: for key, mapped_key in MAPPING.items(): __lowerCamelCase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue __lowerCamelCase = True if "*" in mapped_key: __lowerCamelCase = name.split(_UpperCamelCase )[0].split('''.''' )[-2] __lowerCamelCase = mapped_key.replace('''*''' ,_UpperCamelCase ) if "weight_g" in name: __lowerCamelCase = '''weight_g''' elif "weight_v" in name: __lowerCamelCase = '''weight_v''' elif "bias" in name: __lowerCamelCase = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCamelCase = '''weight''' else: __lowerCamelCase = None set_recursively(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) continue if not is_used: unused_weights.append(_UpperCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ): __lowerCamelCase = full_name.split('''conv_layers.''' )[-1] __lowerCamelCase = name.split('''.''' ) __lowerCamelCase = int(items[0] ) __lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_UpperCamelCase ) @torch.no_grad() def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : List[str] ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : List[Any]=True ): if config_path is not None: __lowerCamelCase = UniSpeechSatConfig.from_pretrained(_UpperCamelCase ) else: __lowerCamelCase = UniSpeechSatConfig() __lowerCamelCase = '''''' if is_finetuned: __lowerCamelCase = UniSpeechSatForCTC(_UpperCamelCase ) else: __lowerCamelCase = UniSpeechSatForPreTraining(_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) __lowerCamelCase = model[0].eval() recursively_load_weights(_UpperCamelCase ,_UpperCamelCase ) hf_wavavec.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) a_ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
330
0
"""simple docstring""" from manim import * class lowercase ( _UpperCAmelCase ): def _snake_case ( self ) -> Tuple: lowerCAmelCase = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase = [mem.copy() for i in range(6 )] lowerCAmelCase = [mem.copy() for i in range(6 )] lowerCAmelCase = VGroup(*lowercase ).arrange(lowercase , buff=0 ) lowerCAmelCase = VGroup(*lowercase ).arrange(lowercase , buff=0 ) lowerCAmelCase = VGroup(lowercase , lowercase ).arrange(lowercase , buff=0 ) lowerCAmelCase = Text("""CPU""" , font_size=24 ) lowerCAmelCase = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowercase ) lowerCAmelCase = [mem.copy() for i in range(1 )] lowerCAmelCase = VGroup(*lowercase ).arrange(lowercase , buff=0 ) lowerCAmelCase = Text("""GPU""" , font_size=24 ) lowerCAmelCase = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase ) gpu.align_to(lowercase , lowercase ) gpu.set_x(gpu.get_x() - 1 ) self.add(lowercase ) lowerCAmelCase = [mem.copy() for i in range(6 )] lowerCAmelCase = VGroup(*lowercase ).arrange(lowercase , buff=0 ) lowerCAmelCase = Text("""Model""" , font_size=24 ) lowerCAmelCase = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase ) model.move_to([3, -1.0, 0] ) self.play( Create(lowercase , run_time=1 ) , Create(lowercase , run_time=1 ) , Create(lowercase , run_time=1 ) , ) lowerCAmelCase = MarkupText( f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , ) lowerCAmelCase = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase = MarkupText( f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(lowercase , run_time=2.5 ) , Write(lowercase ) , Write(lowercase ) ) self.add(lowercase ) lowerCAmelCase = [] lowerCAmelCase = [] lowerCAmelCase = [] for i, rect in enumerate(lowercase ): lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowercase , opacity=0.7 ) cpu_target.move_to(lowercase ) cpu_target.generate_target() lowerCAmelCase = 0.46 / 4 lowerCAmelCase = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=lowercase , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowercase , buff=0.0 ) cpu_targs.append(lowercase ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowercase ) ) second_animations.append(MoveToTarget(lowercase , run_time=1.5 ) ) self.play(*lowercase ) self.play(*lowercase ) self.wait()
46
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING a_ = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase__ ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type(__UpperCAmelCase ) def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , **__UpperCAmelCase ): '''simple docstring''' return {}, {}, {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = load_image(__UpperCAmelCase ) __lowerCamelCase = image.size __lowerCamelCase = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework ) return model_inputs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.model(**__UpperCAmelCase ) return model_outputs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = model_outputs.predicted_depth __lowerCamelCase = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=__UpperCAmelCase ) __lowerCamelCase = prediction.squeeze().cpu().numpy() __lowerCamelCase = (output * 255 / np.max(__UpperCAmelCase )).astype('''uint8''' ) __lowerCamelCase = Image.fromarray(__UpperCAmelCase ) __lowerCamelCase = {} __lowerCamelCase = predicted_depth __lowerCamelCase = depth return output_dict
330
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class A__ ( unittest.TestCase ): def __init__( self : List[str] , _a : List[Any] , _a : List[str]=7 , _a : Optional[int]=3 , _a : Tuple=10 , _a : Optional[Any]=18 , _a : int=30 , _a : Optional[int]=400 , _a : Dict=True , _a : Union[str, Any]=None , _a : List[Any]=True , _a : str=[0.5, 0.5, 0.5] , _a : List[Any]=[0.5, 0.5, 0.5] , _a : str=None , ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =size if size is not None else {'shortest_edge': 18} _SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'height': 18, 'width': 18} _SCREAMING_SNAKE_CASE =parent _SCREAMING_SNAKE_CASE =batch_size _SCREAMING_SNAKE_CASE =num_channels _SCREAMING_SNAKE_CASE =num_frames _SCREAMING_SNAKE_CASE =image_size _SCREAMING_SNAKE_CASE =min_resolution _SCREAMING_SNAKE_CASE =max_resolution _SCREAMING_SNAKE_CASE =do_resize _SCREAMING_SNAKE_CASE =size _SCREAMING_SNAKE_CASE =do_normalize _SCREAMING_SNAKE_CASE =image_mean _SCREAMING_SNAKE_CASE =image_std _SCREAMING_SNAKE_CASE =crop_size def A ( self : int ) -> Dict: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class A__ ( A__ , unittest.TestCase ): A__ = VivitImageProcessor if is_vision_available() else None def A ( self : int ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =VivitImageProcessingTester(self ) @property def A ( self : List[Any] ) -> List[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def A ( self : List[Any] ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , 'image_mean' ) ) self.assertTrue(hasattr(_a , 'image_std' ) ) self.assertTrue(hasattr(_a , 'do_normalize' ) ) self.assertTrue(hasattr(_a , 'do_resize' ) ) self.assertTrue(hasattr(_a , 'do_center_crop' ) ) self.assertTrue(hasattr(_a , 'size' ) ) def A ( self : str ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) _SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def A ( self : Any ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) # create random PIL videos _SCREAMING_SNAKE_CASE =prepare_video_inputs(self.image_processor_tester , equal_resolution=_a ) for video in video_inputs: self.assertIsInstance(_a , _a ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input _SCREAMING_SNAKE_CASE =image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def A ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _SCREAMING_SNAKE_CASE =prepare_video_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for video in video_inputs: self.assertIsInstance(_a , _a ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input _SCREAMING_SNAKE_CASE =image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def A ( self : Optional[int] ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _SCREAMING_SNAKE_CASE =prepare_video_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for video in video_inputs: self.assertIsInstance(_a , _a ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input _SCREAMING_SNAKE_CASE =image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
47
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = ["""pixel_values"""] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __lowerCamelCase = size if size is not None else {'''shortest_edge''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __lowerCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' ) __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = resample __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_normalize __lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD __lowerCamelCase = do_convert_rgb def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowerCamelCase = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = do_resize if do_resize is not None else self.do_resize __lowerCamelCase = size if size is not None else self.size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = resample if resample is not None else self.resample __lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCamelCase = crop_size if crop_size is not None else self.crop_size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCamelCase = image_mean if image_mean is not None else self.image_mean __lowerCamelCase = image_std if image_std is not None else self.image_std __lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowerCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowerCamelCase = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: __lowerCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: __lowerCamelCase = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: __lowerCamelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] __lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __lowerCamelCase = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
330
0
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def A ( _SCREAMING_SNAKE_CASE ) -> Dict: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class UpperCamelCase__ (nn.Module ): '''simple docstring''' def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any: super().__init__() lowerCamelCase : int = module lowerCamelCase : str = nn.Sequential( nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__ ) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__ ) , ) lowerCamelCase : Any = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def _lowercase ( self , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) -> int: return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) + self.adapter(UpperCamelCase__ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = """bigscience/bloom-1b7""" # Constant values lowerCamelCase_ : int = 2.1_09_65_95_52_69_25_74 lowerCamelCase_ : Union[str, Any] = """Hello my name is""" lowerCamelCase_ : Dict = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) lowerCamelCase_ : Optional[Any] = 1_0 def _lowercase ( self ) -> List[str]: # Models and tokenizer lowerCamelCase : str = AutoTokenizer.from_pretrained(self.model_name ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' def _lowercase ( self ) -> Tuple: super().setUp() # Models and tokenizer lowerCamelCase : Tuple = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="auto" ) lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="auto" ) def _lowercase ( self ) -> Dict: del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : int = self.model_abit.config self.assertTrue(hasattr(UpperCamelCase__ , "quantization_config" ) ) lowerCamelCase : Optional[Any] = config.to_dict() lowerCamelCase : Tuple = config.to_diff_dict() lowerCamelCase : Tuple = config.to_json_string() def _lowercase ( self ) -> Any: from bitsandbytes.nn import Paramsabit lowerCamelCase : Union[str, Any] = self.model_fpaa.get_memory_footprint() lowerCamelCase : str = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) lowerCamelCase : Tuple = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def _lowercase ( self ) -> List[Any]: from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(UpperCamelCase__ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def _lowercase ( self ) -> List[Any]: lowerCamelCase : Optional[Any] = self.tokenizer(self.input_text , return_tensors="pt" ) lowerCamelCase : int = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _lowercase ( self ) -> Any: lowerCamelCase : Dict = BitsAndBytesConfig() lowerCamelCase : Dict = True lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase__ , device_map="auto" ) lowerCamelCase : Any = self.tokenizer(self.input_text , return_tensors="pt" ) lowerCamelCase : Optional[int] = model_abit_from_config.generate( input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _lowercase ( self ) -> str: with self.assertRaises(UpperCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(UpperCamelCase__ ) def _lowercase ( self ) -> Any: lowerCamelCase : Dict = BitsAndBytesConfig() with self.assertRaises(UpperCamelCase__ ): lowerCamelCase : Any = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , ) def _lowercase ( self ) -> Optional[int]: with self.assertRaises(UpperCamelCase__ ): # Tries with `str` self.model_abit.to("cpu" ) with self.assertRaises(UpperCamelCase__ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(UpperCamelCase__ ): # Tries with a `device` self.model_abit.to(torch.device("cuda:0" ) ) with self.assertRaises(UpperCamelCase__ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(UpperCamelCase__ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything lowerCamelCase : Tuple = self.tokenizer(self.input_text , return_tensors="pt" ) lowerCamelCase : Optional[int] = self.model_fpaa.to(torch.floataa ) lowerCamelCase : Dict = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error lowerCamelCase : Union[str, Any] = self.model_fpaa.to("cpu" ) # Check this does not throw an error lowerCamelCase : List[Any] = self.model_fpaa.half() # Check this does not throw an error lowerCamelCase : Optional[int] = self.model_fpaa.float() def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=UpperCamelCase__ , device_map="auto" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @classmethod def _lowercase ( cls ) -> Union[str, Any]: lowerCamelCase : str = "t5-small" lowerCamelCase : Optional[int] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense lowerCamelCase : int = AutoTokenizer.from_pretrained(cls.model_name ) lowerCamelCase : Tuple = "Translate in German: Hello, my dog is cute" def _lowercase ( self ) -> List[Any]: gc.collect() torch.cuda.empty_cache() def _lowercase ( self ) -> Optional[int]: from transformers import TaForConditionalGeneration lowerCamelCase : int = TaForConditionalGeneration._keep_in_fpaa_modules lowerCamelCase : List[Any] = None # test with `t5-small` lowerCamelCase : Union[str, Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="auto" ) lowerCamelCase : str = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) lowerCamelCase : List[Any] = model.generate(**UpperCamelCase__ ) # test with `flan-t5-small` lowerCamelCase : str = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="auto" ) lowerCamelCase : str = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) lowerCamelCase : Union[str, Any] = model.generate(**UpperCamelCase__ ) lowerCamelCase : int = modules def _lowercase ( self ) -> int: import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` lowerCamelCase : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="auto" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) lowerCamelCase : str = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) lowerCamelCase : Union[str, Any] = model.generate(**UpperCamelCase__ ) # test with `flan-t5-small` lowerCamelCase : List[Any] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="auto" ) lowerCamelCase : str = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) lowerCamelCase : Any = model.generate(**UpperCamelCase__ ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' def _lowercase ( self ) -> Optional[Any]: super().setUp() # model_name lowerCamelCase : Tuple = "bigscience/bloom-560m" lowerCamelCase : List[Any] = "t5-small" # Different types of model lowerCamelCase : Dict = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="auto" ) # Sequence classification model lowerCamelCase : Any = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=UpperCamelCase__ , device_map="auto" ) # CausalLM model lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="auto" ) # Seq2seq model lowerCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="auto" ) def _lowercase ( self ) -> List[str]: del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def _lowercase ( self ) -> int: from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' def _lowercase ( self ) -> List[str]: super().setUp() def _lowercase ( self ) -> str: del self.pipe gc.collect() torch.cuda.empty_cache() def _lowercase ( self ) -> List[str]: lowerCamelCase : Any = pipeline( "text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass lowerCamelCase : List[Any] = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' def _lowercase ( self ) -> Optional[Any]: super().setUp() def _lowercase ( self ) -> List[Any]: lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=UpperCamelCase__ , device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model lowerCamelCase : Dict = self.tokenizer(self.input_text , return_tensors="pt" ) # Second real batch lowerCamelCase : Any = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : Dict = "facebook/opt-350m" super().setUp() def _lowercase ( self ) -> Dict: if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ): return # Step 1: freeze all parameters lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): lowerCamelCase : Any = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability lowerCamelCase : Optional[Any] = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(UpperCamelCase__ ) ): lowerCamelCase : Dict = LoRALayer(module.q_proj , rank=16 ) lowerCamelCase : Any = LoRALayer(module.k_proj , rank=16 ) lowerCamelCase : Tuple = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch lowerCamelCase : int = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): lowerCamelCase : Tuple = model.forward(**UpperCamelCase__ ) out.logits.norm().backward() for module in model.modules(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(UpperCamelCase__ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : str = """gpt2-xl""" lowerCamelCase_ : int = 3.31_91_85_48_54_15_21_87
48
from __future__ import annotations from typing import Generic, TypeVar a_ = TypeVar("""T""") class __lowerCAmelCase ( Generic[T] ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = data __lowerCamelCase = self __lowerCamelCase = 0 class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # map from node name to the node object __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # create a new set with x as its member __lowerCamelCase = DisjointSetTreeNode(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # find the set x belongs to (with path-compression) __lowerCamelCase = self.map[data] if elem_ref != elem_ref.parent: __lowerCamelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # helper function for union operation if nodea.rank > nodea.rank: __lowerCamelCase = nodea else: __lowerCamelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # merge 2 disjoint sets self.link(self.find_set(__UpperCAmelCase ) , self.find_set(__UpperCAmelCase ) ) class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # connections: map from the node to the neighbouring nodes (with weights) __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # add a node ONLY if its not present in the graph if node not in self.connections: __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # add an edge with the given weight self.add_node(__UpperCAmelCase ) self.add_node(__UpperCAmelCase ) __lowerCamelCase = weight __lowerCamelCase = weight def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __UpperCAmelCase : x[2] ) # creating the disjoint set __lowerCamelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__UpperCAmelCase ) # MST generation __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = edges[index] index += 1 __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) disjoint_set.union(__UpperCAmelCase , __UpperCAmelCase ) return graph
330
0
from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand __snake_case :Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name def __snake_case ( _UpperCAmelCase ): if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(_UpperCAmelCase ): return ext raise Exception( f'Unable to determine file format from file extension {path}. ' f'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' ) def __snake_case ( _UpperCAmelCase ): __a = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) __a = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format __a = PipelineDataFormat.from_str( format=_UpperCAmelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(_UpperCAmelCase , _UpperCAmelCase ) class _A ( __UpperCAmelCase ): def __init__( self : int , __SCREAMING_SNAKE_CASE : Pipeline , __SCREAMING_SNAKE_CASE : PipelineDataFormat): '''simple docstring''' __a = nlp __a = reader @staticmethod def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : ArgumentParser): '''simple docstring''' __a = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''') run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''') run_parser.add_argument('''--input''' , type=__SCREAMING_SNAKE_CASE , help='''Path to the file to use for inference''') run_parser.add_argument('''--output''' , type=__SCREAMING_SNAKE_CASE , help='''Path to the file that will be used post to write results.''') run_parser.add_argument('''--model''' , type=__SCREAMING_SNAKE_CASE , help='''Name or path to the model to instantiate.''') run_parser.add_argument('''--config''' , type=__SCREAMING_SNAKE_CASE , help='''Name or path to the model\'s config to instantiate.''') run_parser.add_argument( '''--tokenizer''' , type=__SCREAMING_SNAKE_CASE , help='''Name of the tokenizer to use. (default: same as the model name)''') run_parser.add_argument( '''--column''' , type=__SCREAMING_SNAKE_CASE , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , ) run_parser.add_argument( '''--format''' , type=__SCREAMING_SNAKE_CASE , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , ) run_parser.add_argument( '''--device''' , type=__SCREAMING_SNAKE_CASE , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , ) run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''') run_parser.set_defaults(func=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a , __a = self._nlp, [] for entry in self._reader: __a = nlp(**__SCREAMING_SNAKE_CASE) if self._reader.is_multi_columns else nlp(__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): outputs.append(__SCREAMING_SNAKE_CASE) else: outputs += output # Saving data if self._nlp.binary_output: __a = self._reader.save_binary(__SCREAMING_SNAKE_CASE) logger.warning(F'Current pipeline requires output to be in binary format, saving at {binary_path}') else: self._reader.save(__SCREAMING_SNAKE_CASE)
49
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self ): '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_config() __lowerCamelCase = 300 return config def lowerCamelCase ( self ): '''simple docstring''' ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = self.prepare_config_and_inputs() __lowerCamelCase = True __lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = True __lowerCamelCase = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_choices __lowerCamelCase = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = config_and_inputs __lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = () def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCamelCase = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason='''MRA does not output attentions''' ) def lowerCamelCase ( self ): '''simple docstring''' return @require_torch class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) __lowerCamelCase = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer _UpperCAmelCase : Optional[int] = logging.get_logger(__name__) _UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _UpperCAmelCase : int = { """vocab_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt""" ), """google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""", """google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""", }, """tokenizer_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json""" ), """google/realm-orqa-nq-openqa""": ( """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-nq-reader""": ( """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-openqa""": ( """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-reader""": ( """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json""" ), }, } _UpperCAmelCase : int = { """google/realm-cc-news-pretrained-embedder""": 5_12, """google/realm-cc-news-pretrained-encoder""": 5_12, """google/realm-cc-news-pretrained-scorer""": 5_12, """google/realm-cc-news-pretrained-openqa""": 5_12, """google/realm-orqa-nq-openqa""": 5_12, """google/realm-orqa-nq-reader""": 5_12, """google/realm-orqa-wq-openqa""": 5_12, """google/realm-orqa-wq-reader""": 5_12, } _UpperCAmelCase : Any = { """google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-reader""": {"""do_lower_case""": True}, """google/realm-orqa-wq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-wq-reader""": {"""do_lower_case""": True}, } class lowerCAmelCase ( __UpperCamelCase ): UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = RealmTokenizer def __init__( self : Optional[int] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Tuple="[PAD]" , UpperCAmelCase : List[Any]="[CLS]" , UpperCAmelCase : Union[str, Any]="[MASK]" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Optional[int] , ) -> str: super().__init__( UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , ) lowerCamelCase__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , UpperCAmelCase ) != do_lower_case or normalizer_state.get('strip_accents' , UpperCAmelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ : Optional[int] = getattr(UpperCAmelCase , normalizer_state.pop('type' ) ) lowerCamelCase__ : Optional[Any] = do_lower_case lowerCamelCase__ : str = strip_accents lowerCamelCase__ : Optional[Any] = tokenize_chinese_chars lowerCamelCase__ : int = normalizer_class(**UpperCAmelCase ) lowerCamelCase__ : str = do_lower_case def A_ ( self : Optional[int] , UpperCAmelCase : int , **UpperCAmelCase : int ) -> List[Any]: lowerCamelCase__ : List[Any] = PaddingStrategy.MAX_LENGTH lowerCamelCase__ : Optional[int] = text lowerCamelCase__ : Dict = kwargs.pop('text_pair' , UpperCAmelCase ) lowerCamelCase__ : List[Any] = kwargs.pop('return_tensors' , UpperCAmelCase ) lowerCamelCase__ : List[Any] = { 'input_ids': [], 'attention_mask': [], 'token_type_ids': [], } for idx, candidate_text in enumerate(UpperCAmelCase ): if batch_text_pair is not None: lowerCamelCase__ : Tuple = batch_text_pair[idx] else: lowerCamelCase__ : Dict = None lowerCamelCase__ : Optional[int] = super().__call__(UpperCAmelCase , UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) lowerCamelCase__ : Any = encoded_candidates.get('input_ids' ) lowerCamelCase__ : Union[str, Any] = encoded_candidates.get('attention_mask' ) lowerCamelCase__ : Tuple = encoded_candidates.get('token_type_ids' ) if encoded_input_ids is not None: output_data["input_ids"].append(UpperCAmelCase ) if encoded_attention_mask is not None: output_data["attention_mask"].append(UpperCAmelCase ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(UpperCAmelCase ) lowerCamelCase__ : int = {key: item for key, item in output_data.items() if len(UpperCAmelCase ) != 0} return BatchEncoding(UpperCAmelCase , tensor_type=UpperCAmelCase ) def A_ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None ) -> List[str]: lowerCamelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def A_ ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: lowerCamelCase__ : List[Any] = [self.sep_token_id] lowerCamelCase__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A_ ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: lowerCamelCase__ : int = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase )
50
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""EncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TFEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""FlaxEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
0
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __snake_case ( a ): def __init__( self : str , _snake_case : NestedDataStructureLike[PathLike] , _snake_case : Optional[NamedSplit] = None , _snake_case : Optional[Features] = None , _snake_case : str = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : Optional[int] = None , **_snake_case : Optional[int] , ): """simple docstring""" super().__init__( _snake_case , split=_snake_case , features=_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case , streaming=_snake_case , num_proc=_snake_case , **_snake_case , ) UpperCAmelCase_ = path_or_paths if isinstance(_snake_case , _snake_case) else {self.split: path_or_paths} UpperCAmelCase_ = Text( cache_dir=_snake_case , data_files=_snake_case , features=_snake_case , **_snake_case , ) def lowerCamelCase ( self : int): """simple docstring""" if self.streaming: UpperCAmelCase_ = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None self.builder.download_and_prepare( download_config=_snake_case , download_mode=_snake_case , verification_mode=_snake_case , base_path=_snake_case , num_proc=self.num_proc , ) UpperCAmelCase_ = self.builder.as_dataset( split=self.split , verification_mode=_snake_case , in_memory=self.keep_in_memory) return dataset
51
from string import ascii_lowercase, ascii_uppercase def a__ ( _UpperCamelCase : str ): if not sentence: return "" __lowerCamelCase = dict(zip(_UpperCamelCase ,_UpperCamelCase ) ) return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
330
0
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class A__ ( __snake_case ): def __init__( self , *A_ , A_=None , A_=None , **A_ ): '''simple docstring''' super().__init__(*A_ , **A_ ) UpperCamelCase : str = eval_examples UpperCamelCase : int = post_process_function def __UpperCamelCase( self , A_=None , A_=None , A_=None , A_ = "eval" ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.eval_dataset if eval_dataset is None else eval_dataset UpperCamelCase : str = self.get_eval_dataloader(A_ ) UpperCamelCase : List[Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCamelCase : Dict = self.compute_metrics UpperCamelCase : Dict = None UpperCamelCase : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCamelCase : List[str] = time.time() try: UpperCamelCase : Optional[int] = eval_loop( A_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , metric_key_prefix=A_ , ) finally: UpperCamelCase : Optional[Any] = compute_metrics UpperCamelCase : Tuple = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( A_ , A_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default UpperCamelCase : Tuple = self.post_process_function(A_ , A_ , output.predictions ) UpperCamelCase : Tuple = self.compute_metrics(A_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): UpperCamelCase : Tuple = metrics.pop(A_ ) metrics.update(output.metrics ) else: UpperCamelCase : Union[str, Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(A_ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCamelCase : Tuple = self.callback_handler.on_evaluate(self.args , self.state , self.control , A_ ) return metrics def __UpperCamelCase( self , A_ , A_ , A_=None , A_ = "test" ): '''simple docstring''' UpperCamelCase : Any = self.get_test_dataloader(A_ ) # Temporarily disable metric computation, we will do it in the loop here. UpperCamelCase : Any = self.compute_metrics UpperCamelCase : List[Any] = None UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCamelCase : Tuple = time.time() try: UpperCamelCase : List[Any] = eval_loop( A_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , metric_key_prefix=A_ , ) finally: UpperCamelCase : List[str] = compute_metrics UpperCamelCase : Union[str, Any] = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( A_ , A_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output UpperCamelCase : List[str] = self.post_process_function(A_ , A_ , output.predictions , "predict" ) UpperCamelCase : List[str] = self.compute_metrics(A_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): UpperCamelCase : str = metrics.pop(A_ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A_ )
52
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __lowerCAmelCase ( lowerCAmelCase__ ): @slow @require_torch def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' ) __lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) __lowerCamelCase = bertabert.config.encoder.vocab_size __lowerCamelCase = tokenizer.sep_token_id __lowerCamelCase = tokenizer.cls_token_id __lowerCamelCase = 128 __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' ) __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' ) __lowerCamelCase = train_dataset.select(range(32 ) ) __lowerCamelCase = val_dataset.select(range(16 ) ) __lowerCamelCase = 4 def _map_to_encoder_decoder_inputs(__UpperCAmelCase ): # Tokenizer will automatically set [BOS] <text> [EOS] __lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 ) __lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 ) __lowerCamelCase = inputs.input_ids __lowerCamelCase = inputs.attention_mask __lowerCamelCase = outputs.input_ids __lowerCamelCase = outputs.input_ids.copy() __lowerCamelCase = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels'''] ] __lowerCamelCase = outputs.attention_mask assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids ) assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(__UpperCAmelCase ): __lowerCamelCase = pred.label_ids __lowerCamelCase = pred.predictions # all unnecessary tokens are removed __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase ) return {"accuracy": accuracy} # map train dataset __lowerCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) train_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) # same for validation dataset __lowerCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) val_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) __lowerCamelCase = self.get_auto_remove_tmp_dir() __lowerCamelCase = SeqaSeqTrainingArguments( output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __lowerCamelCase = SeqaSeqTrainer( model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , ) # start training trainer.train()
330
0
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. a__ : Optional[Any] =abspath(join(dirname(dirname(dirname(__file__))), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def lowercase__ ( __lowercase : List[str] ) -> Optional[int]: """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main __UpperCamelCase = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(__lowercase , id=__lowercase )
53
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
0
"""simple docstring""" def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) for i in range(n - 1 ): for j in range(i + 1 , lowerCAmelCase_ ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if len(lowerCAmelCase_ ) <= 1: return arr, 0 __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) // 2 __SCREAMING_SNAKE_CASE = arr[0:mid] __SCREAMING_SNAKE_CASE = arr[mid:] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = _count_cross_inversions(lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = inversion_p + inversions_q + cross_inversions return c, num_inversions def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = 0 while i < len(lowerCAmelCase_ ) and j < len(lowerCAmelCase_ ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(lowerCAmelCase_ ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(lowerCAmelCase_ ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) __SCREAMING_SNAKE_CASE = count_inversions_bf(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(lowerCAmelCase_ ) assert num_inversions_bf == num_inversions_recursive == 8 print("number of inversions = " , lowerCAmelCase_ ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() __SCREAMING_SNAKE_CASE = count_inversions_bf(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(lowerCAmelCase_ ) assert num_inversions_bf == num_inversions_recursive == 0 print("number of inversions = " , lowerCAmelCase_ ) # an empty list should also have zero inversions __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = count_inversions_bf(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(lowerCAmelCase_ ) assert num_inversions_bf == num_inversions_recursive == 0 print("number of inversions = " , lowerCAmelCase_ ) if __name__ == "__main__": main()
54
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=1000 ): '''simple docstring''' __lowerCamelCase = p_stop __lowerCamelCase = max_length def __iter__( self ): '''simple docstring''' __lowerCamelCase = 0 __lowerCamelCase = False while not stop and count < self.max_length: yield count count += 1 __lowerCamelCase = random.random() < self.p_stop class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = [ BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 ) ] __lowerCamelCase = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] __lowerCamelCase = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ): '''simple docstring''' random.seed(__UpperCAmelCase ) __lowerCamelCase = list(__UpperCAmelCase ) __lowerCamelCase = [ IterableDatasetShard( __UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , ) for i in range(__UpperCAmelCase ) ] __lowerCamelCase = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(__UpperCAmelCase ) iterable_dataset_lists.append(list(__UpperCAmelCase ) ) __lowerCamelCase = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size __lowerCamelCase = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 ) __lowerCamelCase = [] for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(__UpperCAmelCase ) < len(__UpperCAmelCase ): reference += reference self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = 42 __lowerCamelCase = RandomIterableDataset() self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) # Edge case with a very small dataset __lowerCamelCase = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = SkipBatchSampler(__UpperCAmelCase , 2 ) self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 ) __lowerCamelCase = skip_first_batches(__UpperCAmelCase , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def lowerCamelCase ( self ): '''simple docstring''' Accelerator() __lowerCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
330
0
'''simple docstring''' import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def __snake_case ( UpperCAmelCase_ : str="" ): lowerCamelCase_ = tempfile.mkdtemp() return os.path.join(UpperCAmelCase_ , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = torch.rand(12 , dtype=torch.floataa ) - 0.5 lowerCamelCase_ = AgentAudio(UpperCamelCase ) lowerCamelCase_ = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(UpperCamelCase , agent_type.to_raw() , atol=1e-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(UpperCamelCase ) ) # Ensure that the file contains the same value as the original tensor lowerCamelCase_ ,lowerCamelCase_ = sf.read(UpperCamelCase ) self.assertTrue(torch.allclose(UpperCamelCase , torch.tensor(UpperCamelCase ) , atol=1e-4 ) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = torch.rand(12 , dtype=torch.floataa ) - 0.5 lowerCamelCase_ = get_new_path(suffix=".wav" ) sf.write(UpperCamelCase , UpperCamelCase , 1_6000 ) lowerCamelCase_ = AgentAudio(UpperCamelCase ) self.assertTrue(torch.allclose(UpperCamelCase , agent_type.to_raw() , atol=1e-4 ) ) self.assertEqual(agent_type.to_string() , UpperCamelCase ) @require_vision @require_torch class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = torch.randint(0 , 256 , (64, 64, 3) ) lowerCamelCase_ = AgentImage(UpperCamelCase ) lowerCamelCase_ = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(UpperCamelCase , agent_type._tensor , atol=1e-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(UpperCamelCase ) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" lowerCamelCase_ = Image.open(UpperCamelCase ) lowerCamelCase_ = AgentImage(UpperCamelCase ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(UpperCamelCase ) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" lowerCamelCase_ = Image.open(UpperCamelCase ) lowerCamelCase_ = AgentImage(UpperCamelCase ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(UpperCamelCase ) ) class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "Hey!" lowerCamelCase_ = AgentText(UpperCamelCase ) self.assertEqual(UpperCamelCase , agent_type.to_string() ) self.assertEqual(UpperCamelCase , agent_type.to_raw() ) self.assertEqual(UpperCamelCase , UpperCamelCase )
55
def a__ ( _UpperCamelCase : int ): __lowerCamelCase = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
330
0
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' if height >= 1: move_tower(height - 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) move_disk(__UpperCAmelCase, __UpperCAmelCase ) move_tower(height - 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Any: '''simple docstring''' print('''moving disk from''', __UpperCAmelCase, '''to''', __UpperCAmelCase ) def __magic_name__ ( ) -> List[Any]: '''simple docstring''' snake_case_ = int(input('''Height of hanoi: ''' ).strip() ) move_tower(__UpperCAmelCase, '''A''', '''B''', '''C''' ) if __name__ == "__main__": main()
56
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ = 16 a_ = 32 def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : int = 16 ): __lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' ) def tokenize_function(_UpperCamelCase : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCamelCase = datasets.map( _UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' ) def collate_fn(_UpperCamelCase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCamelCase = 16 elif accelerator.mixed_precision != "no": __lowerCamelCase = 8 else: __lowerCamelCase = None return tokenizer.pad( _UpperCamelCase ,padding='''longest''' ,max_length=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_tensors='''pt''' ,) # Instantiate dataloaders. __lowerCamelCase = DataLoader( tokenized_datasets['''train'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) __lowerCamelCase = DataLoader( tokenized_datasets['''validation'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders a_ = mocked_dataloaders # noqa: F811 def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,_UpperCamelCase ) == "1": __lowerCamelCase = 2 # Initialize accelerator __lowerCamelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCamelCase = config['''lr'''] __lowerCamelCase = int(config['''num_epochs'''] ) __lowerCamelCase = int(config['''seed'''] ) __lowerCamelCase = int(config['''batch_size'''] ) __lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=_UpperCamelCase ) def inner_training_loop(_UpperCamelCase : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(_UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCamelCase = model.to(accelerator.device ) # Instantiate optimizer __lowerCamelCase = AdamW(params=model.parameters() ,lr=_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = get_dataloaders(_UpperCamelCase ,_UpperCamelCase ) # Instantiate scheduler __lowerCamelCase = get_linear_schedule_with_warmup( optimizer=_UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(_UpperCamelCase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = accelerator.prepare( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) # Now we train the model for epoch in range(_UpperCamelCase ): model.train() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.loss accelerator.backward(_UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.logits.argmax(dim=-1 ) __lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_UpperCamelCase ,references=_UpperCamelCase ,) __lowerCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" ,_UpperCamelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a__ ( ): __lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' ,) parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_UpperCamelCase ,_UpperCamelCase ) if __name__ == "__main__": main()
330
0
"""simple docstring""" def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise ValueError("iterations must be defined as integers" ) if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) __lowerCAmelCase = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(_UpperCamelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
57
import logging import os import threading import time try: import warnings except ImportError: a_ = None try: import msvcrt except ImportError: a_ = None try: import fcntl except ImportError: a_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: a_ = OSError # Data # ------------------------------------------------ a_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] a_ = """3.0.12""" a_ = None def a__ ( ): global _logger __lowerCamelCase = _logger or logging.getLogger(__name__ ) return _logger class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock_file return None def __str__( self ): '''simple docstring''' __lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class __lowerCAmelCase : def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock return None def __enter__( self ): '''simple docstring''' return self.lock def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.lock.release() return None class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase ) # The path to the lock file. __lowerCamelCase = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __lowerCamelCase = None # The default timeout value. __lowerCamelCase = timeout # We use this lock primarily for the lock counter. __lowerCamelCase = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __lowerCamelCase = 0 return None @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file @property def lowerCamelCase ( self ): '''simple docstring''' return self._timeout @timeout.setter def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = float(__UpperCAmelCase ) return None def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file_fd is not None def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ): '''simple docstring''' # Use the default timeout, if no timeout is provided. if timeout is None: __lowerCamelCase = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file __lowerCamelCase = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(__UpperCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __lowerCamelCase = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def lowerCamelCase ( self , __UpperCAmelCase=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __lowerCamelCase = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self ): '''simple docstring''' self.acquire() return self def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.release() return None def __del__( self ): '''simple docstring''' self.release(force=__UpperCAmelCase ) return None def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = os.path.basename(__UpperCAmelCase ) if len(__UpperCAmelCase ) > max_length and max_length > 0: __lowerCamelCase = os.path.dirname(__UpperCAmelCase ) __lowerCamelCase = str(hash(__UpperCAmelCase ) ) __lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(__UpperCAmelCase , __UpperCAmelCase ) else: return path class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) __lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: try: msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(__UpperCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) try: fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN ) os.close(__UpperCAmelCase ) return None class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' os.close(self._lock_file_fd ) __lowerCamelCase = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None a_ = None if msvcrt: a_ = WindowsFileLock elif fcntl: a_ = UnixFileLock else: a_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
330
0
'''simple docstring''' import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class a_ : '''simple docstring''' def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> int: _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = batch_size _SCREAMING_SNAKE_CASE = seq_length _SCREAMING_SNAKE_CASE = is_training _SCREAMING_SNAKE_CASE = use_input_mask _SCREAMING_SNAKE_CASE = use_token_type_ids _SCREAMING_SNAKE_CASE = use_labels _SCREAMING_SNAKE_CASE = vocab_size _SCREAMING_SNAKE_CASE = hidden_size _SCREAMING_SNAKE_CASE = num_hidden_layers _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = intermediate_size _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = hidden_dropout_prob _SCREAMING_SNAKE_CASE = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE = max_position_embeddings _SCREAMING_SNAKE_CASE = type_vocab_size _SCREAMING_SNAKE_CASE = type_sequence_label_size _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = num_labels _SCREAMING_SNAKE_CASE = num_choices _SCREAMING_SNAKE_CASE = scope def snake_case_( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE = None if self.use_input_mask: _SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None if self.use_labels: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) _SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case_( self ) -> Any: return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , ) def snake_case_( self , A , A , A , A , A , A , A ) -> str: _SCREAMING_SNAKE_CASE = NystromformerModel(config=A ) model.to(A ) model.eval() _SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A ) _SCREAMING_SNAKE_CASE = model(A , token_type_ids=A ) _SCREAMING_SNAKE_CASE = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case_( self , A , A , A , A , A , A , A ) -> str: _SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=A ) model.to(A ) model.eval() _SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case_( self , A , A , A , A , A , A , A ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=A ) model.to(A ) model.eval() _SCREAMING_SNAKE_CASE = model( A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case_( self , A , A , A , A , A , A , A ) -> Any: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(A ) model.to(A ) model.eval() _SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case_( self , A , A , A , A , A , A , A ) -> str: _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=A ) model.to(A ) model.eval() _SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case_( self , A , A , A , A , A , A , A ) -> Optional[int]: _SCREAMING_SNAKE_CASE = self.num_choices _SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=A ) model.to(A ) model.eval() _SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE = model( A , attention_mask=A , token_type_ids=A , labels=A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case_( self ) -> Dict: _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) = config_and_inputs _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a_ ( snake_case_ , snake_case_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) UpperCamelCase = ( { '''feature-extraction''': NystromformerModel, '''fill-mask''': NystromformerForMaskedLM, '''question-answering''': NystromformerForQuestionAnswering, '''text-classification''': NystromformerForSequenceClassification, '''token-classification''': NystromformerForTokenClassification, '''zero-shot''': NystromformerForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False def snake_case_( self ) -> Tuple: _SCREAMING_SNAKE_CASE = NystromformerModelTester(self ) _SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A , hidden_size=37 ) def snake_case_( self ) -> int: self.config_tester.run_common_tests() def snake_case_( self ) -> Dict: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def snake_case_( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*A ) def snake_case_( self ) -> Dict: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A ) def snake_case_( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A ) def snake_case_( self ) -> Tuple: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def snake_case_( self ) -> Dict: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def snake_case_( self ) -> int: _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @slow def snake_case_( self ) -> int: for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(A ) self.assertIsNotNone(A ) @require_torch class a_ ( unittest.TestCase ): '''simple docstring''' @slow def snake_case_( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) _SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(A )[0] _SCREAMING_SNAKE_CASE = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , A ) _SCREAMING_SNAKE_CASE = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) ) @slow def snake_case_( self ) -> int: _SCREAMING_SNAKE_CASE = """the [MASK] of Belgium is Brussels""" _SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) _SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) _SCREAMING_SNAKE_CASE = tokenizer(A , return_tensors="""pt""" ) with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits _SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(A ) , """capital""" )
58
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase="divided_space_time" , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = num_channels __lowerCamelCase = patch_size __lowerCamelCase = num_frames __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = attention_type __lowerCamelCase = initializer_range __lowerCamelCase = scope __lowerCamelCase = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __lowerCamelCase = (image_size // patch_size) ** 2 __lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __lowerCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __lowerCamelCase = self.num_labels return config def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) # verify the logits shape __lowerCamelCase = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = config_and_inputs __lowerCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowerCAmelCase__ = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerModelTester(self ) __lowerCamelCase = ConfigTester( self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = copy.deepcopy(__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def lowerCamelCase ( self ): '''simple docstring''' pass def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = TimesformerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' if not self.has_attentions: pass else: __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase = True for model_class in self.all_model_classes: __lowerCamelCase = self.model_tester.seq_length __lowerCamelCase = self.model_tester.num_frames __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __lowerCamelCase = len(__UpperCAmelCase ) # Check attention is always last and order is fine __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(out_len + 1 , len(__UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowerCamelCase ( self ): '''simple docstring''' def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.hidden_states __lowerCamelCase = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) __lowerCamelCase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def a__ ( ): __lowerCamelCase = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' ) __lowerCamelCase = np.load(_UpperCamelCase ) return list(_UpperCamelCase ) @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self ): '''simple docstring''' # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( __UpperCAmelCase ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_video() __lowerCamelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**__UpperCAmelCase ) # verify the logits __lowerCamelCase = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
0
import re from filelock import FileLock try: import nltk __lowerCamelCase = True except (ImportError, ModuleNotFoundError): __lowerCamelCase = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def UpperCamelCase ( __lowerCamelCase : str ): re.sub("<n>" , "" , __lowerCamelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__lowerCamelCase ) )
59
def a__ ( _UpperCamelCase : int ): if not isinstance(_UpperCamelCase ,_UpperCamelCase ): __lowerCamelCase = F"""Input value of [number={number}] must be an integer""" raise TypeError(_UpperCamelCase ) if number < 0: return False __lowerCamelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
330
0
"""simple docstring""" def _snake_case ( _snake_case : str = "The quick brown fox jumps over the lazy dog" , ): lowerCAmelCase : List[str] = set() # Replace all the whitespace in our sentence lowerCAmelCase : List[Any] = input_str.replace(''' ''' , '''''' ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(_snake_case ) == 26 def _snake_case ( _snake_case : str = "The quick brown fox jumps over the lazy dog" , ): lowerCAmelCase : Tuple = [False] * 26 for char in input_str: if char.islower(): lowerCAmelCase : int = True elif char.isupper(): lowerCAmelCase : Optional[Any] = True return all(_snake_case ) def _snake_case ( _snake_case : str = "The quick brown fox jumps over the lazy dog" , ): return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def _snake_case ( ): from timeit import timeit lowerCAmelCase : Optional[Any] = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest''' print(timeit('''is_pangram()''' , setup=_snake_case ) ) print(timeit('''is_pangram_faster()''' , setup=_snake_case ) ) print(timeit('''is_pangram_fastest()''' , setup=_snake_case ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
60
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={"_".join([str(__UpperCAmelCase ) for s in shape] )}.npy""" def lowerCamelCase ( self ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 4, 64, 64) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return image def lowerCamelCase ( self , __UpperCAmelCase=False , __UpperCAmelCase="CompVis/stable-diffusion-v1-4" ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = '''bf16''' if fpaa else None __lowerCamelCase ,__lowerCamelCase = FlaxUNetaDConditionModel.from_pretrained( __UpperCAmelCase , subfolder='''unet''' , dtype=__UpperCAmelCase , revision=__UpperCAmelCase ) return model, params def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 77, 768) , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]], [17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]], [8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]], [3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]], [17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]], [8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]], [3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]], # fmt: on ] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_latents(__UpperCAmelCase , shape=(4, 4, 96, 96) , fpaa=__UpperCAmelCase ) __lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , shape=(4, 77, 1024) , fpaa=__UpperCAmelCase ) __lowerCamelCase = model.apply( {'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample assert sample.shape == latents.shape __lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
330
0
"""simple docstring""" def __a ( __lowerCamelCase ): UpperCAmelCase_ : Tuple = len(__lowerCamelCase ) for _ in range(__lowerCamelCase ): for i in range(_ % 2, arr_size - 1, 2 ): if arr[i + 1] < arr[i]: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = arr[i + 1], arr[i] return arr if __name__ == "__main__": _a = list(range(10, 0, -1)) print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
61
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
0
from heapq import heappop, heappush import numpy as np def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : bool , ): __UpperCamelCase , __UpperCamelCase =grid.shape __UpperCamelCase =[-1, 1, 0, 0] __UpperCamelCase =[0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] __UpperCamelCase , __UpperCamelCase =[(0, source)], set() __UpperCamelCase =np.full((rows, cols) , np.inf ) __UpperCamelCase =0 __UpperCamelCase =np.empty((rows, cols) , dtype=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =None while queue: ((__UpperCamelCase) , (__UpperCamelCase)) =heappop(SCREAMING_SNAKE_CASE__ ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: __UpperCamelCase =[] while (x, y) != source: path.append((x, y) ) __UpperCamelCase , __UpperCamelCase =predecessors[x, y] path.append(SCREAMING_SNAKE_CASE__ ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(SCREAMING_SNAKE_CASE__ ) ): __UpperCamelCase , __UpperCamelCase =x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: __UpperCamelCase =grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(SCREAMING_SNAKE_CASE__ , (dist + 1, (nx, ny)) ) __UpperCamelCase =dist + 1 __UpperCamelCase =(x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
62
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def a__ ( _UpperCamelCase : Optional[int] ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __lowerCAmelCase ( nn.Module ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() __lowerCamelCase = module __lowerCamelCase = nn.Sequential( nn.Linear(module.in_features , __UpperCAmelCase , bias=__UpperCAmelCase ) , nn.Linear(__UpperCAmelCase , module.out_features , bias=__UpperCAmelCase ) , ) __lowerCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__UpperCAmelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowerCamelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.module(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) + self.adapter(__UpperCAmelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module lowerCAmelCase__ = """bigscience/bloom-1b7""" # Constant values lowerCAmelCase__ = 2.1_09_65_95_52_69_25_74 lowerCAmelCase__ = """Hello my name is""" lowerCAmelCase__ = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) lowerCAmelCase__ = 1_0 def lowerCamelCase ( self ): '''simple docstring''' # Models and tokenizer __lowerCamelCase = AutoTokenizer.from_pretrained(self.model_name ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # Models and tokenizer __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='''auto''' ) __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_abit.config self.assertTrue(hasattr(__UpperCAmelCase , '''quantization_config''' ) ) __lowerCamelCase = config.to_dict() __lowerCamelCase = config.to_diff_dict() __lowerCamelCase = config.to_json_string() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit __lowerCamelCase = self.model_fpaa.get_memory_footprint() __lowerCamelCase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __lowerCamelCase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__UpperCAmelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() __lowerCamelCase = True __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = model_abit_from_config.generate( input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BitsAndBytesConfig() with self.assertRaises(__UpperCAmelCase ): __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , ) def lowerCamelCase ( self ): '''simple docstring''' with self.assertRaises(__UpperCAmelCase ): # Tries with `str` self.model_abit.to('''cpu''' ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.to(torch.device('''cuda:0''' ) ) with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__UpperCAmelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCamelCase = self.model_fpaa.to(torch.floataa ) __lowerCamelCase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.to('''cpu''' ) # Check this does not throw an error __lowerCamelCase = self.model_fpaa.half() # Check this does not throw an error __lowerCamelCase = self.model_fpaa.float() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): @classmethod def lowerCamelCase ( cls ): '''simple docstring''' __lowerCamelCase = '''t5-small''' __lowerCamelCase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense __lowerCamelCase = AutoTokenizer.from_pretrained(cls.model_name ) __lowerCamelCase = '''Translate in German: Hello, my dog is cute''' def lowerCamelCase ( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from transformers import TaForConditionalGeneration __lowerCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules __lowerCamelCase = None # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) __lowerCamelCase = modules def lowerCamelCase ( self ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) # test with `flan-t5-small` __lowerCamelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCamelCase = model.generate(**__UpperCAmelCase ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # model_name __lowerCamelCase = '''bigscience/bloom-560m''' __lowerCamelCase = '''t5-small''' # Different types of model __lowerCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Sequence classification model __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # CausalLM model __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) # Seq2seq model __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' ) def lowerCamelCase ( self ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = pipeline( '''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __lowerCamelCase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__UpperCAmelCase , device_map='''balanced''' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) # Second real batch __lowerCamelCase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = '''facebook/opt-350m''' super().setUp() def lowerCamelCase ( self ): '''simple docstring''' if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ): return # Step 1: freeze all parameters __lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __lowerCamelCase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __lowerCamelCase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__UpperCAmelCase ) ): __lowerCamelCase = LoRALayer(module.q_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.k_proj , rank=16 ) __lowerCamelCase = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __lowerCamelCase = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __lowerCamelCase = model.forward(**__UpperCAmelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__UpperCAmelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = """gpt2-xl""" lowerCAmelCase__ = 3.31_91_85_48_54_15_21_87
330
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ : str = logging.get_logger(__name__) def _lowerCamelCase ( lowercase : int , lowercase : Union[str, Any]=False ) -> Optional[Any]: _a = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight') ) rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _a = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str]=False ) -> List[Any]: for i in range(config.num_hidden_layers ): if base_model: _a = "" else: _a = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _a = state_dict.pop(F'blocks.{i}.attn.qkv.weight' ) _a = state_dict.pop(F'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict _a = in_proj_weight[ : config.hidden_size, : ] _a = in_proj_bias[: config.hidden_size] _a = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _a = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _a = in_proj_weight[ -config.hidden_size :, : ] _a = in_proj_bias[-config.hidden_size :] def _lowerCamelCase ( lowercase : str ) -> List[str]: _a = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(lowercase , lowercase ) def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[str] , lowercase : Union[str, Any] ) -> int: _a = dct.pop(lowercase ) _a = val def _lowerCamelCase ( ) -> Dict: _a = "http://images.cocodataset.org/val2017/000000039769.jpg" _a = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im @torch.no_grad() def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : List[str] , lowercase : List[Any]=False ) -> List[str]: _a = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=lowercase , ) _a = ViTHybridConfig(backbone_config=lowercase , image_size=384 , num_labels=1000 ) _a = False # load original model from timm _a = timm.create_model(lowercase , pretrained=lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _a = timm_model.state_dict() if base_model: remove_classification_head_(lowercase ) _a = create_rename_keys(lowercase , lowercase ) for src, dest in rename_keys: rename_key(lowercase , lowercase , lowercase ) read_in_q_k_v(lowercase , lowercase , lowercase ) _a = "huggingface/label-files" _a = "imagenet-1k-id2label.json" _a = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) _a = {int(lowercase ): v for k, v in idalabel.items()} _a = idalabel _a = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _a = ViTHybridModel(lowercase ).eval() else: _a = ViTHybridForImageClassification(lowercase ).eval() model.load_state_dict(lowercase ) # create image processor _a = create_transform(**resolve_data_config({} , model=lowercase ) ) _a = transform.transforms _a = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _a = ViTHybridImageProcessor( do_resize=lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _a = prepare_img() _a = transform(lowercase ).unsqueeze(0 ) _a = processor(lowercase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(lowercase , lowercase ) # verify logits with torch.no_grad(): _a = model(lowercase ) _a = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: _a = timm_model.forward_features(lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(lowercase , outputs.pooler_output , atol=1E-3 ) else: _a = timm_model(lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowercase , outputs.logits , atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(lowercase ).mkdir(exist_ok=lowercase ) print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowercase ) print(F'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(lowercase ) if push_to_hub: print(F'Pushing model and processor to the hub {vit_name}' ) model.push_to_hub(F'ybelkada/{vit_name}' ) processor.push_to_hub(F'ybelkada/{vit_name}' ) if __name__ == "__main__": lowerCAmelCase_ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_r50_s16_384', type=str, help='Name of the hybrid ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) lowerCAmelCase_ : Union[str, Any] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
63
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = 42 class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ = True @register_to_config def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (64,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 4 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 0.18_215 , ): '''simple docstring''' super().__init__() # pass init params to Encoder __lowerCamelCase = Encoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , ) # pass init params to Decoder __lowerCamelCase = Decoder( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , ) __lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __lowerCamelCase = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 ) __lowerCamelCase = False __lowerCamelCase = False # only relevant if vae tiling is enabled __lowerCamelCase = self.config.sample_size __lowerCamelCase = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __lowerCamelCase = 0.25 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' if isinstance(__UpperCAmelCase , (Encoder, Decoder) ): __lowerCamelCase = value def lowerCamelCase ( self , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = use_tiling def lowerCamelCase ( self ): '''simple docstring''' self.enable_tiling(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = True def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = {} def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): __lowerCamelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return processors def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = len(self.attn_processors.keys() ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count: raise ValueError( F"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the""" F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if hasattr(__UpperCAmelCase , '''set_processor''' ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): module.set_processor(__UpperCAmelCase ) else: module.set_processor(processor.pop(F"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) if self.use_slicing and x.shape[0] > 1: __lowerCamelCase = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase ) __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) @apply_forward_hook def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' if self.use_slicing and z.shape[0] > 1: __lowerCamelCase = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )] __lowerCamelCase = torch.cat(__UpperCAmelCase ) else: __lowerCamelCase = self._decode(__UpperCAmelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[2] , b.shape[2] , __UpperCAmelCase ) for y in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = min(a.shape[3] , b.shape[3] , __UpperCAmelCase ) for x in range(__UpperCAmelCase ): __lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __lowerCamelCase = [] for i in range(0 , x.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , x.shape[3] , __UpperCAmelCase ): __lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __lowerCamelCase = self.encoder(__UpperCAmelCase ) __lowerCamelCase = self.quant_conv(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) __lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ): '''simple docstring''' __lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor ) __lowerCamelCase = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __lowerCamelCase = [] for i in range(0 , z.shape[2] , __UpperCAmelCase ): __lowerCamelCase = [] for j in range(0 , z.shape[3] , __UpperCAmelCase ): __lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __lowerCamelCase = self.post_quant_conv(__UpperCAmelCase ) __lowerCamelCase = self.decoder(__UpperCAmelCase ) row.append(__UpperCAmelCase ) rows.append(__UpperCAmelCase ) __lowerCamelCase = [] for i, row in enumerate(__UpperCAmelCase ): __lowerCamelCase = [] for j, tile in enumerate(__UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase ) if j > 0: __lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) ) __lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ): '''simple docstring''' __lowerCamelCase = sample __lowerCamelCase = self.encode(__UpperCAmelCase ).latent_dist if sample_posterior: __lowerCamelCase = posterior.sample(generator=__UpperCAmelCase ) else: __lowerCamelCase = posterior.mode() __lowerCamelCase = self.decode(__UpperCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__UpperCAmelCase )
330
0
"""simple docstring""" from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase( __a ): '''simple docstring''' @staticmethod @abstractmethod def UpperCamelCase_ ( a_: ArgumentParser ): '''simple docstring''' raise NotImplementedError() @abstractmethod def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' raise NotImplementedError()
64
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration a_ = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] a_ = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] a_ = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) a_ = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) a_ = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ): for tf_name, hf_name in patterns: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase ) __lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() __lowerCamelCase = {} # separating decoder weights __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = DECODER_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = REMAINING_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" __lowerCamelCase = mapping['''model.embed_positions.weight'''] __lowerCamelCase = mapping.pop('''model.embed_positions.weight''' ) __lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : int ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ): __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() a_ = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
330
0
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class A : def __init__(self : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=1_3 , __UpperCAmelCase : int=7 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=9_9 , __UpperCAmelCase : Tuple=3_2 , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : str=3_7 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Optional[Any]=5_1_2 , __UpperCAmelCase : Tuple=1_6 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : str=4 , __UpperCAmelCase : str=None , ) -> Any: """simple docstring""" UpperCAmelCase__ = parent UpperCAmelCase__ = 1_3 UpperCAmelCase__ = 7 UpperCAmelCase__ = True UpperCAmelCase__ = True UpperCAmelCase__ = True UpperCAmelCase__ = True UpperCAmelCase__ = 9_9 UpperCAmelCase__ = 3_8_4 UpperCAmelCase__ = 2 UpperCAmelCase__ = 4 UpperCAmelCase__ = 3_7 UpperCAmelCase__ = "gelu" UpperCAmelCase__ = 0.1 UpperCAmelCase__ = 0.1 UpperCAmelCase__ = 5_1_2 UpperCAmelCase__ = 1_6 UpperCAmelCase__ = 2 UpperCAmelCase__ = 0.02 UpperCAmelCase__ = 3 UpperCAmelCase__ = 4 UpperCAmelCase__ = 1_2_8 UpperCAmelCase__ = 2 UpperCAmelCase__ = 9 UpperCAmelCase__ = 1 UpperCAmelCase__ = None def lowercase_ (self : Any ) -> str: """simple docstring""" UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ = None if self.use_input_mask: UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ = None if self.use_token_type_ids: UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = None if self.use_labels: UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ (self : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Tuple ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = TFConvBertModel(config=__UpperCAmelCase ) UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCAmelCase__ = [input_ids, input_mask] UpperCAmelCase__ = model(__UpperCAmelCase ) UpperCAmelCase__ = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ (self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : int ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = TFConvBertForMaskedLM(config=__UpperCAmelCase ) UpperCAmelCase__ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCAmelCase__ = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase_ (self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ) -> Tuple: """simple docstring""" UpperCAmelCase__ = self.num_labels UpperCAmelCase__ = TFConvBertForSequenceClassification(config=__UpperCAmelCase ) UpperCAmelCase__ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCAmelCase__ = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ (self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] ) -> str: """simple docstring""" UpperCAmelCase__ = self.num_choices UpperCAmelCase__ = TFConvBertForMultipleChoice(config=__UpperCAmelCase ) UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase__ = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } UpperCAmelCase__ = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase_ (self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : int ) -> int: """simple docstring""" UpperCAmelCase__ = self.num_labels UpperCAmelCase__ = TFConvBertForTokenClassification(config=__UpperCAmelCase ) UpperCAmelCase__ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCAmelCase__ = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase_ (self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any ) -> Optional[int]: """simple docstring""" UpperCAmelCase__ = TFConvBertForQuestionAnswering(config=__UpperCAmelCase ) UpperCAmelCase__ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCAmelCase__ = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase_ (self : Tuple ) -> Dict: """simple docstring""" UpperCAmelCase__ = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) = config_and_inputs UpperCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): __UpperCAmelCase : Tuple = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) __UpperCAmelCase : Optional[Any] = ( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) __UpperCAmelCase : List[Any] = False __UpperCAmelCase : Optional[Any] = False __UpperCAmelCase : str = False def lowercase_ (self : Tuple ) -> str: """simple docstring""" UpperCAmelCase__ = TFConvBertModelTester(self ) UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 ) def lowercase_ (self : str ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() def lowercase_ (self : Optional[int] ) -> Dict: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowercase_ (self : str ) -> Tuple: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowercase_ (self : int ) -> Dict: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowercase_ (self : Tuple ) -> str: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowercase_ (self : Optional[Any] ) -> int: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowercase_ (self : Optional[int] ) -> str: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowercase_ (self : Union[str, Any] ) -> Any: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ = True UpperCAmelCase__ = True if hasattr(__UpperCAmelCase , "use_cache" ): UpperCAmelCase__ = True UpperCAmelCase__ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) UpperCAmelCase__ = getattr(self.model_tester , "key_length" , __UpperCAmelCase ) for model_class in self.all_model_classes: UpperCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) UpperCAmelCase__ = model_class(__UpperCAmelCase ) UpperCAmelCase__ = len(model(__UpperCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCAmelCase , saved_model=__UpperCAmelCase ) UpperCAmelCase__ = os.path.join(__UpperCAmelCase , "saved_model" , "1" ) UpperCAmelCase__ = tf.keras.models.load_model(__UpperCAmelCase ) UpperCAmelCase__ = model(__UpperCAmelCase ) if self.is_encoder_decoder: UpperCAmelCase__ = outputs["encoder_hidden_states"] UpperCAmelCase__ = outputs["encoder_attentions"] else: UpperCAmelCase__ = outputs["hidden_states"] UpperCAmelCase__ = outputs["attentions"] self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) UpperCAmelCase__ = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def lowercase_ (self : Any ) -> Any: """simple docstring""" UpperCAmelCase__ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(__UpperCAmelCase ) def lowercase_ (self : int ) -> List[str]: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ = True UpperCAmelCase__ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) UpperCAmelCase__ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) UpperCAmelCase__ = getattr(self.model_tester , "key_length" , __UpperCAmelCase ) UpperCAmelCase__ = getattr(self.model_tester , "key_length" , __UpperCAmelCase ) def check_decoder_attentions_output(__UpperCAmelCase : List[str] ): UpperCAmelCase__ = len(__UpperCAmelCase ) self.assertEqual(out_len % 2 , 0 ) UpperCAmelCase__ = outputs.decoder_attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(__UpperCAmelCase : List[str] ): UpperCAmelCase__ = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: UpperCAmelCase__ = True UpperCAmelCase__ = False UpperCAmelCase__ = model_class(__UpperCAmelCase ) UpperCAmelCase__ = model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) UpperCAmelCase__ = len(__UpperCAmelCase ) self.assertEqual(config.output_hidden_states , __UpperCAmelCase ) check_encoder_attentions_output(__UpperCAmelCase ) if self.is_encoder_decoder: UpperCAmelCase__ = model_class(__UpperCAmelCase ) UpperCAmelCase__ = model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , __UpperCAmelCase ) check_decoder_attentions_output(__UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] UpperCAmelCase__ = True UpperCAmelCase__ = model_class(__UpperCAmelCase ) UpperCAmelCase__ = model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , __UpperCAmelCase ) check_encoder_attentions_output(__UpperCAmelCase ) # Check attention is always last and order is fine UpperCAmelCase__ = True UpperCAmelCase__ = True UpperCAmelCase__ = model_class(__UpperCAmelCase ) UpperCAmelCase__ = model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , __UpperCAmelCase ) check_encoder_attentions_output(__UpperCAmelCase ) @require_tf class A ( unittest.TestCase ): @slow def lowercase_ (self : Dict ) -> Any: """simple docstring""" UpperCAmelCase__ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCAmelCase__ = model(__UpperCAmelCase )[0] UpperCAmelCase__ = [1, 6, 7_6_8] self.assertEqual(output.shape , __UpperCAmelCase ) UpperCAmelCase__ = tf.constant( [ [ [-0.03475493, -0.4686034, -0.30638832], [0.22637248, -0.26988646, -0.7423424], [0.10324868, -0.45013508, -0.58280784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 )
65
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch a_ = logging.get_logger(__name__) class __lowerCAmelCase : def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase=None ): '''simple docstring''' if not conversation_id: __lowerCamelCase = uuid.uuida() if past_user_inputs is None: __lowerCamelCase = [] if generated_responses is None: __lowerCamelCase = [] __lowerCamelCase = conversation_id __lowerCamelCase = past_user_inputs __lowerCamelCase = generated_responses __lowerCamelCase = text def __eq__( self , __UpperCAmelCase ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) __lowerCamelCase = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: __lowerCamelCase = text def lowerCamelCase ( self ): '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __lowerCamelCase = None def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' self.generated_responses.append(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): '''simple docstring''' __lowerCamelCase = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): __lowerCamelCase = '''user''' if is_user else '''bot''' output += F"""{name} >> {text} \n""" return output @add_end_docstrings( lowerCAmelCase__ , r""" min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. """ , ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) if self.tokenizer.pad_token_id is None: __lowerCamelCase = self.tokenizer.eos_token def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = {} __lowerCamelCase = {} __lowerCamelCase = {} if min_length_for_response is not None: __lowerCamelCase = min_length_for_response if minimum_tokens is not None: __lowerCamelCase = minimum_tokens if "max_length" in generate_kwargs: __lowerCamelCase = generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __lowerCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self , __UpperCAmelCase , __UpperCAmelCase=0 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = super().__call__(__UpperCAmelCase , num_workers=__UpperCAmelCase , **__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=32 ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ): __lowerCamelCase = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version __lowerCamelCase = self._legacy_parse_and_tokenize(__UpperCAmelCase ) if self.framework == "pt": __lowerCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": __lowerCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=10 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) __lowerCamelCase = model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) __lowerCamelCase = max_length - minimum_tokens __lowerCamelCase = model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: __lowerCamelCase = model_inputs['''attention_mask'''][:, -trim:] __lowerCamelCase = model_inputs.pop('''conversation''' ) __lowerCamelCase = max_length __lowerCamelCase = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase ) if self.model.config.is_encoder_decoder: __lowerCamelCase = 1 else: __lowerCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = model_outputs['''output_ids'''] __lowerCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , ) __lowerCamelCase = model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(__UpperCAmelCase ) return conversation def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.tokenizer.eos_token_id __lowerCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > self.tokenizer.model_max_length: __lowerCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
330
0
"""simple docstring""" from math import isqrt, loga def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Tuple = [True] * max_number for i in range(2, isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2, _lowercase, _lowercase ): snake_case_ :Dict = False return [i for i in range(2, _lowercase ) if is_prime[i]] def A_ ( _lowercase = 800800, _lowercase = 800800 ): '''simple docstring''' snake_case_ :Union[str, Any] = degree * loga(_lowercase ) snake_case_ :Tuple = int(_lowercase ) snake_case_ :List[str] = calculate_prime_numbers(_lowercase ) snake_case_ :Union[str, Any] = 0 snake_case_ :List[str] = 0 snake_case_ :Optional[Any] = len(_lowercase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"""{solution() = }""")
66
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params a_ = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["""memory_attention""", """encoder_attn"""], ["""attention""", """attn"""], ["""/""", """."""], [""".LayerNorm.gamma""", """_layer_norm.weight"""], [""".LayerNorm.beta""", """_layer_norm.bias"""], ["""r.layer_""", """r.layers."""], ["""output_proj""", """out_proj"""], ["""ffn.dense_1.""", """fc2."""], ["""ffn.dense.""", """fc1."""], ["""ffn_layer_norm""", """final_layer_norm"""], ["""kernel""", """weight"""], ["""encoder_layer_norm.""", """encoder.layer_norm."""], ["""decoder_layer_norm.""", """decoder.layer_norm."""], ["""embeddings.weights""", """shared.weight"""], ] def a__ ( _UpperCamelCase : int ): for pegasus_name, hf_name in PATTERNS: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = DEFAULTS.copy() cfg_kwargs.update(_UpperCamelCase ) __lowerCamelCase = PegasusConfig(**_UpperCamelCase ) __lowerCamelCase = PegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.model.state_dict() __lowerCamelCase = {} for k, v in tf_weights.items(): __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ) if new_k not in sd: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if "dense" in k or "proj" in new_k: __lowerCamelCase = v.T __lowerCamelCase = torch.tensor(_UpperCamelCase ,dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}""" # make sure embedding.padding_idx is respected __lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] ) __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = mapping['''shared.weight'''] __lowerCamelCase = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping} mapping.update(**_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = torch_model.model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight'''] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : str="./ckpt/aeslc/model.ckpt-32000" ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''Adafactor''', '''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # save tokenizer first __lowerCamelCase = Path(_UpperCamelCase ).parent.name __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings'''] __lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' ,model_max_length=_UpperCamelCase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(_UpperCamelCase ) # convert model __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""] if dataset == "large": __lowerCamelCase = task_specific_params __lowerCamelCase = convert_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() sd.pop('''model.decoder.embed_positions.weight''' ) sd.pop('''model.encoder.embed_positions.weight''' ) torch.save(_UpperCamelCase ,Path(_UpperCamelCase ) / '''pytorch_model.bin''' ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() if args.save_dir is None: a_ = Path(args.tf_ckpt_path).parent.name a_ = os.path.join("""pegasus""", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
330
0
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class a__ ( unittest.TestCase ): lowerCamelCase : Optional[int] =MODEL_FOR_CAUSAL_LM_MAPPING lowerCamelCase : int =TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output __lowerCamelCase = text_generator('''This is a test''' , do_sample=a ) self.assertEqual( a , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) __lowerCamelCase = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( a , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) __lowerCamelCase = text_generator('''This is a test''' , do_sample=a , num_return_sequences=2 , return_tensors=a ) self.assertEqual( a , [ {'''generated_token_ids''': ANY(a )}, {'''generated_token_ids''': ANY(a )}, ] , ) __lowerCamelCase = text_generator.model.config.eos_token_id __lowerCamelCase = '''<pad>''' __lowerCamelCase = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=a , num_return_sequences=2 , batch_size=2 , return_tensors=a , ) self.assertEqual( a , [ [ {'''generated_token_ids''': ANY(a )}, {'''generated_token_ids''': ANY(a )}, ], [ {'''generated_token_ids''': ANY(a )}, {'''generated_token_ids''': ANY(a )}, ], ] , ) @require_tf def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output __lowerCamelCase = text_generator('''This is a test''' , do_sample=a ) self.assertEqual( a , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) __lowerCamelCase = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=a ) self.assertEqual( a , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Dict , a : int , a : Optional[Any] ): """simple docstring""" __lowerCamelCase = TextGenerationPipeline(model=a , tokenizer=a ) return text_generator, ["This is a test", "Another test"] def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" __lowerCamelCase = '''Hello I believe in''' __lowerCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) __lowerCamelCase = text_generator(a ) self.assertEqual( a , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) __lowerCamelCase = text_generator(a , stop_sequence=''' fe''' ) self.assertEqual(a , [{'''generated_text''': '''Hello I believe in fe'''}] ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : List[str] , a : Optional[int] ): """simple docstring""" __lowerCamelCase = text_generator.model __lowerCamelCase = text_generator.tokenizer __lowerCamelCase = text_generator('''This is a test''' ) self.assertEqual(a , [{'''generated_text''': ANY(a )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) __lowerCamelCase = text_generator('''This is a test''' , return_full_text=a ) self.assertEqual(a , [{'''generated_text''': ANY(a )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) __lowerCamelCase = pipeline(task='''text-generation''' , model=a , tokenizer=a , return_full_text=a ) __lowerCamelCase = text_generator('''This is a test''' ) self.assertEqual(a , [{'''generated_text''': ANY(a )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) __lowerCamelCase = text_generator('''This is a test''' , return_full_text=a ) self.assertEqual(a , [{'''generated_text''': ANY(a )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) __lowerCamelCase = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=a ) self.assertEqual( a , [ [{'''generated_text''': ANY(a )}, {'''generated_text''': ANY(a )}], [{'''generated_text''': ANY(a )}, {'''generated_text''': ANY(a )}], ] , ) if text_generator.tokenizer.pad_token is not None: __lowerCamelCase = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=a ) self.assertEqual( a , [ [{'''generated_text''': ANY(a )}, {'''generated_text''': ANY(a )}], [{'''generated_text''': ANY(a )}, {'''generated_text''': ANY(a )}], ] , ) with self.assertRaises(a ): __lowerCamelCase = text_generator('''test''' , return_full_text=a , return_text=a ) with self.assertRaises(a ): __lowerCamelCase = text_generator('''test''' , return_full_text=a , return_tensors=a ) with self.assertRaises(a ): __lowerCamelCase = text_generator('''test''' , return_text=a , return_tensors=a ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): __lowerCamelCase = text_generator('''''' ) self.assertEqual(a , [{'''generated_text''': ANY(a )}] ) else: with self.assertRaises((ValueError, AssertionError) ): __lowerCamelCase = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. __lowerCamelCase = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_00_00 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 5_00 , max_new_tokens=20 ) __lowerCamelCase = text_generator('''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(a ): text_generator( '''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" import torch # Classic `model_kwargs` __lowerCamelCase = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) __lowerCamelCase = pipe('''This is a test''' ) self.assertEqual( a , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) __lowerCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) __lowerCamelCase = pipe('''This is a test''' ) self.assertEqual( a , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 __lowerCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) __lowerCamelCase = pipe('''This is a test''' ) self.assertEqual( a , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" import torch __lowerCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" import torch __lowerCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=a , top_p=0.5 ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" __lowerCamelCase = '''Hello world''' __lowerCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": __lowerCamelCase = logging.get_logger('''transformers.generation.tf_utils''' ) else: __lowerCamelCase = logging.get_logger('''transformers.generation.utils''' ) __lowerCamelCase = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(a ) as cl: __lowerCamelCase = text_generator(a , max_length=10 , max_new_tokens=1 ) self.assertIn(a , cl.out ) # The user only sets one -> no warning with CaptureLogger(a ) as cl: __lowerCamelCase = text_generator(a , max_new_tokens=1 ) self.assertNotIn(a , cl.out ) with CaptureLogger(a ) as cl: __lowerCamelCase = text_generator(a , max_length=10 ) self.assertNotIn(a , cl.out )
67
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } a_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ): for attribute in key.split('''.''' ): __lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ) if weight_type is not None: __lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ).shape else: __lowerCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCamelCase = value elif weight_type == "weight_g": __lowerCamelCase = value elif weight_type == "weight_v": __lowerCamelCase = value elif weight_type == "bias": __lowerCamelCase = value else: __lowerCamelCase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Any ): __lowerCamelCase = [] __lowerCamelCase = fairseq_model.state_dict() __lowerCamelCase = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __lowerCamelCase = False if "conv_layers" in name: load_conv_layer( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,) __lowerCamelCase = True else: for key, mapped_key in MAPPING.items(): __lowerCamelCase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue __lowerCamelCase = True if "*" in mapped_key: __lowerCamelCase = name.split(_UpperCamelCase )[0].split('''.''' )[-2] __lowerCamelCase = mapped_key.replace('''*''' ,_UpperCamelCase ) if "weight_g" in name: __lowerCamelCase = '''weight_g''' elif "weight_v" in name: __lowerCamelCase = '''weight_v''' elif "bias" in name: __lowerCamelCase = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCamelCase = '''weight''' else: __lowerCamelCase = None set_recursively(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) continue if not is_used: unused_weights.append(_UpperCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ): __lowerCamelCase = full_name.split('''conv_layers.''' )[-1] __lowerCamelCase = name.split('''.''' ) __lowerCamelCase = int(items[0] ) __lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_UpperCamelCase ) @torch.no_grad() def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : List[str] ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : List[Any]=True ): if config_path is not None: __lowerCamelCase = UniSpeechSatConfig.from_pretrained(_UpperCamelCase ) else: __lowerCamelCase = UniSpeechSatConfig() __lowerCamelCase = '''''' if is_finetuned: __lowerCamelCase = UniSpeechSatForCTC(_UpperCamelCase ) else: __lowerCamelCase = UniSpeechSatForPreTraining(_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) __lowerCamelCase = model[0].eval() recursively_load_weights(_UpperCamelCase ,_UpperCamelCase ) hf_wavavec.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) a_ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
330
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class a__ ( snake_case , snake_case ): """simple docstring""" __lowerCamelCase = 'swin' __lowerCamelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , lowercase=224 , lowercase=4 , lowercase=3 , lowercase=96 , lowercase=[2, 2, 6, 2] , lowercase=[3, 6, 12, 24] , lowercase=7 , lowercase=4.0 , lowercase=True , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase="gelu" , lowercase=False , lowercase=0.02 , lowercase=1e-5 , lowercase=32 , lowercase=None , lowercase=None , **lowercase , ) -> Union[str, Any]: '''simple docstring''' super().__init__(**lowercase ) A__ = image_size A__ = patch_size A__ = num_channels A__ = embed_dim A__ = depths A__ = len(lowercase ) A__ = num_heads A__ = window_size A__ = mlp_ratio A__ = qkv_bias A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = drop_path_rate A__ = hidden_act A__ = use_absolute_embeddings A__ = layer_norm_eps A__ = initializer_range A__ = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model A__ = int(embed_dim * 2 ** (len(lowercase ) - 1) ) A__ = ["stem"] + [F'stage{idx}' for idx in range(1 , len(lowercase ) + 1 )] A__ , A__ = get_aligned_output_features_output_indices( out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names ) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = version.parse('1.11' ) @property def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCamelCase ( self ) -> float: '''simple docstring''' return 1e-4
68
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING a_ = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase__ ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) requires_backends(self , '''vision''' ) self.check_model_type(__UpperCAmelCase ) def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , **__UpperCAmelCase ): '''simple docstring''' return {}, {}, {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = load_image(__UpperCAmelCase ) __lowerCamelCase = image.size __lowerCamelCase = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework ) return model_inputs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.model(**__UpperCAmelCase ) return model_outputs def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = model_outputs.predicted_depth __lowerCamelCase = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=__UpperCAmelCase ) __lowerCamelCase = prediction.squeeze().cpu().numpy() __lowerCamelCase = (output * 255 / np.max(__UpperCAmelCase )).astype('''uint8''' ) __lowerCamelCase = Image.fromarray(__UpperCAmelCase ) __lowerCamelCase = {} __lowerCamelCase = predicted_depth __lowerCamelCase = depth return output_dict
330
0
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self) -> Optional[int]: snake_case_ = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4') snake_case_ = sd_pipe.to(lowerCAmelCase__) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__) sd_pipe.set_scheduler('sample_euler') snake_case_ = 'A painting of a squirrel eating a burger' snake_case_ = torch.manual_seed(0) snake_case_ = sd_pipe([prompt], generator=lowerCAmelCase__, guidance_scale=9.0, num_inference_steps=20, output_type='np') snake_case_ = output.images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case_ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self) -> Any: snake_case_ = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base') snake_case_ = sd_pipe.to(lowerCAmelCase__) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__) sd_pipe.set_scheduler('sample_euler') snake_case_ = 'A painting of a squirrel eating a burger' snake_case_ = torch.manual_seed(0) snake_case_ = sd_pipe([prompt], generator=lowerCAmelCase__, guidance_scale=9.0, num_inference_steps=20, output_type='np') snake_case_ = output.images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case_ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-1 def a_ ( self) -> Any: snake_case_ = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base') snake_case_ = sd_pipe.to(lowerCAmelCase__) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__) sd_pipe.set_scheduler('sample_dpmpp_2m') snake_case_ = 'A painting of a squirrel eating a burger' snake_case_ = torch.manual_seed(0) snake_case_ = sd_pipe( [prompt], generator=lowerCAmelCase__, guidance_scale=7.5, num_inference_steps=15, output_type='np', use_karras_sigmas=lowerCAmelCase__, ) snake_case_ = output.images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case_ = np.array( [0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
69
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = ["""pixel_values"""] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __lowerCamelCase = size if size is not None else {'''shortest_edge''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __lowerCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' ) __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = resample __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_normalize __lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD __lowerCamelCase = do_convert_rgb def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowerCamelCase = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = do_resize if do_resize is not None else self.do_resize __lowerCamelCase = size if size is not None else self.size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = resample if resample is not None else self.resample __lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCamelCase = crop_size if crop_size is not None else self.crop_size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCamelCase = image_mean if image_mean is not None else self.image_mean __lowerCamelCase = image_std if image_std is not None else self.image_std __lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowerCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowerCamelCase = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: __lowerCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: __lowerCamelCase = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: __lowerCamelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] __lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __lowerCamelCase = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
330
0
'''simple docstring''' from __future__ import annotations import os from typing import Any import requests A__ : Optional[Any] ='''https://api.github.com''' # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user A__ : Union[str, Any] =BASE_URL + '''/user''' # https://github.com/settings/tokens A__ : Optional[int] =os.environ.get('''USER_TOKEN''', '''''') def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = { """Authorization""": f"token {auth_token}", """Accept""": """application/vnd.github.v3+json""", } return requests.get(lowerCAmelCase , headers=lowerCAmelCase ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(F"""{key}: {value}""") else: raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
70
from __future__ import annotations from typing import Generic, TypeVar a_ = TypeVar("""T""") class __lowerCAmelCase ( Generic[T] ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = data __lowerCamelCase = self __lowerCamelCase = 0 class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # map from node name to the node object __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # create a new set with x as its member __lowerCamelCase = DisjointSetTreeNode(__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # find the set x belongs to (with path-compression) __lowerCamelCase = self.map[data] if elem_ref != elem_ref.parent: __lowerCamelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # helper function for union operation if nodea.rank > nodea.rank: __lowerCamelCase = nodea else: __lowerCamelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # merge 2 disjoint sets self.link(self.find_set(__UpperCAmelCase ) , self.find_set(__UpperCAmelCase ) ) class __lowerCAmelCase ( Generic[T] ): def __init__( self ): '''simple docstring''' # connections: map from the node to the neighbouring nodes (with weights) __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' # add a node ONLY if its not present in the graph if node not in self.connections: __lowerCamelCase = {} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' # add an edge with the given weight self.add_node(__UpperCAmelCase ) self.add_node(__UpperCAmelCase ) __lowerCamelCase = weight __lowerCamelCase = weight def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __UpperCAmelCase : x[2] ) # creating the disjoint set __lowerCamelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__UpperCAmelCase ) # MST generation __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = edges[index] index += 1 __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) __lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) disjoint_set.union(__UpperCAmelCase , __UpperCAmelCase ) return graph
330
0
import os from distutils.util import strtobool def A ( a_ ,a_ ) -> Dict: for e in env_keys: __UpperCamelCase : Union[str, Any] =int(os.environ.get(a_ ,-1 ) ) if val >= 0: return val return default def A ( a_ ,a_=False ) -> Tuple: __UpperCamelCase : Tuple =os.environ.get(a_ ,str(a_ ) ) return strtobool(a_ ) == 1 # As its name indicates `strtobool` actually returns an int... def A ( a_ ,a_="no" ) -> int: __UpperCamelCase : Any =os.environ.get(a_ ,str(a_ ) ) return value
71
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self ): '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.get_config() __lowerCamelCase = 300 return config def lowerCamelCase ( self ): '''simple docstring''' ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = self.prepare_config_and_inputs() __lowerCamelCase = True __lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = True __lowerCamelCase = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_choices __lowerCamelCase = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) ,( __lowerCamelCase ) , ) = config_and_inputs __lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = () def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCamelCase = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason='''MRA does not output attentions''' ) def lowerCamelCase ( self ): '''simple docstring''' return @require_torch class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) __lowerCamelCase = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): __lowerCamelCase = model(__UpperCAmelCase )[0] __lowerCamelCase = 50265 __lowerCamelCase = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
0
"""simple docstring""" def snake_case_ ( A_ : str, A_ : str ): '''simple docstring''' if not (isinstance(A_, A_ ) and isinstance(A_, A_ )): raise ValueError('''longest_common_substring() takes two strings for inputs''' ) _lowerCamelCase : List[Any] = len(A_ ) _lowerCamelCase : Optional[Any] = len(A_ ) _lowerCamelCase : Tuple = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Tuple = 0 for i in range(1, texta_length + 1 ): for j in range(1, texta_length + 1 ): if texta[i - 1] == texta[j - 1]: _lowerCamelCase : Any = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: _lowerCamelCase : Optional[int] = i _lowerCamelCase : Optional[Any] = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
72
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""EncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TFEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""FlaxEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
0
# This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests a =open # noqa: we just need to have a builtin inside this module to test it properly
73
from string import ascii_lowercase, ascii_uppercase def a__ ( _UpperCamelCase : str ): if not sentence: return "" __lowerCamelCase = dict(zip(_UpperCamelCase ,_UpperCamelCase ) ) return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
330
0
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: str = (PNDMScheduler,) _lowerCamelCase: Tuple = (('''num_inference_steps''', 50),) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,**A_ : List[Any] ) -> Union[str, Any]: A = { 'num_train_timesteps': 1000, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', } config.update(**A_ ) return config def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=0 ,**A_ : Optional[int] ) -> Optional[Any]: A = dict(self.forward_default_kwargs ) A = kwargs.pop('num_inference_steps' ,A_ ) A = self.dummy_sample A = 0.1 * sample A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A = self.get_scheduler_config(**A_ ) A = scheduler_class(**A_ ) scheduler.set_timesteps(A_ ) # copy over dummy past residuals A = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(A_ ) A = scheduler_class.from_pretrained(A_ ) new_scheduler.set_timesteps(A_ ) # copy over dummy past residuals A = dummy_past_residuals[:] A = scheduler.step_prk(A_ ,A_ ,A_ ,**A_ ).prev_sample A = new_scheduler.step_prk(A_ ,A_ ,A_ ,**A_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A = scheduler.step_plms(A_ ,A_ ,A_ ,**A_ ).prev_sample A = new_scheduler.step_plms(A_ ,A_ ,A_ ,**A_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: pass def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[Any]=0 ,**A_ : List[str] ) -> Tuple: A = dict(self.forward_default_kwargs ) A = kwargs.pop('num_inference_steps' ,A_ ) A = self.dummy_sample A = 0.1 * sample A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A = self.get_scheduler_config() A = scheduler_class(**A_ ) scheduler.set_timesteps(A_ ) # copy over dummy past residuals (must be after setting timesteps) A = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(A_ ) A = scheduler_class.from_pretrained(A_ ) # copy over dummy past residuals new_scheduler.set_timesteps(A_ ) # copy over dummy past residual (must be after setting timesteps) A = dummy_past_residuals[:] A = scheduler.step_prk(A_ ,A_ ,A_ ,**A_ ).prev_sample A = new_scheduler.step_prk(A_ ,A_ ,A_ ,**A_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A = scheduler.step_plms(A_ ,A_ ,A_ ,**A_ ).prev_sample A = new_scheduler.step_plms(A_ ,A_ ,A_ ,**A_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,**A_ : List[Any] ) -> Any: A = self.scheduler_classes[0] A = self.get_scheduler_config(**A_ ) A = scheduler_class(**A_ ) A = 10 A = self.dummy_model() A = self.dummy_sample_deter scheduler.set_timesteps(A_ ) for i, t in enumerate(scheduler.prk_timesteps ): A = model(A_ ,A_ ) A = scheduler.step_prk(A_ ,A_ ,A_ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): A = model(A_ ,A_ ) A = scheduler.step_plms(A_ ,A_ ,A_ ).prev_sample return sample def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: A = dict(self.forward_default_kwargs ) A = kwargs.pop('num_inference_steps' ,A_ ) for scheduler_class in self.scheduler_classes: A = self.get_scheduler_config() A = scheduler_class(**A_ ) A = self.dummy_sample A = 0.1 * sample if num_inference_steps is not None and hasattr(A_ ,'set_timesteps' ): scheduler.set_timesteps(A_ ) elif num_inference_steps is not None and not hasattr(A_ ,'set_timesteps' ): A = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] A = dummy_past_residuals[:] A = scheduler.step_prk(A_ ,0 ,A_ ,**A_ ).prev_sample A = scheduler.step_prk(A_ ,1 ,A_ ,**A_ ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) A = scheduler.step_plms(A_ ,0 ,A_ ,**A_ ).prev_sample A = scheduler.step_plms(A_ ,1 ,A_ ,**A_ ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) def _SCREAMING_SNAKE_CASE ( self : str ) -> str: for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=A_ ) A = self.scheduler_classes[0] A = self.get_scheduler_config(steps_offset=1 ) A = scheduler_class(**A_ ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps ,torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: for beta_start, beta_end in zip([0.00_01, 0.0_01] ,[0.0_02, 0.02] ): self.check_over_configs(beta_start=A_ ,beta_end=A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: for t in [1, 5, 10]: self.check_over_forward(time_step=A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]: for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ): self.check_over_forward(num_inference_steps=A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 A = 27 for scheduler_class in self.scheduler_classes: A = self.dummy_sample A = 0.1 * sample A = self.get_scheduler_config() A = scheduler_class(**A_ ) scheduler.set_timesteps(A_ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): A = scheduler.step_prk(A_ ,A_ ,A_ ).prev_sample def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: with self.assertRaises(A_ ): A = self.scheduler_classes[0] A = self.get_scheduler_config() A = scheduler_class(**A_ ) scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: A = self.full_loop() A = torch.sum(torch.abs(A_ ) ) A = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 1_98.13_18 ) < 1e-2 assert abs(result_mean.item() - 0.25_80 ) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: A = self.full_loop(prediction_type='v_prediction' ) A = torch.sum(torch.abs(A_ ) ) A = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 67.39_86 ) < 1e-2 assert abs(result_mean.item() - 0.08_78 ) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: # We specify different beta, so that the first alpha is 0.99 A = self.full_loop(set_alpha_to_one=A_ ,beta_start=0.01 ) A = torch.sum(torch.abs(A_ ) ) A = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 2_30.03_99 ) < 1e-2 assert abs(result_mean.item() - 0.29_95 ) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: # We specify different beta, so that the first alpha is 0.99 A = self.full_loop(set_alpha_to_one=A_ ,beta_start=0.01 ) A = torch.sum(torch.abs(A_ ) ) A = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 1_86.94_82 ) < 1e-2 assert abs(result_mean.item() - 0.24_34 ) < 1e-3
74
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __lowerCAmelCase ( lowerCAmelCase__ ): @slow @require_torch def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' ) __lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) __lowerCamelCase = bertabert.config.encoder.vocab_size __lowerCamelCase = tokenizer.sep_token_id __lowerCamelCase = tokenizer.cls_token_id __lowerCamelCase = 128 __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' ) __lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' ) __lowerCamelCase = train_dataset.select(range(32 ) ) __lowerCamelCase = val_dataset.select(range(16 ) ) __lowerCamelCase = 4 def _map_to_encoder_decoder_inputs(__UpperCAmelCase ): # Tokenizer will automatically set [BOS] <text> [EOS] __lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 ) __lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 ) __lowerCamelCase = inputs.input_ids __lowerCamelCase = inputs.attention_mask __lowerCamelCase = outputs.input_ids __lowerCamelCase = outputs.input_ids.copy() __lowerCamelCase = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels'''] ] __lowerCamelCase = outputs.attention_mask assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids ) assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(__UpperCAmelCase ): __lowerCamelCase = pred.label_ids __lowerCamelCase = pred.predictions # all unnecessary tokens are removed __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase ) return {"accuracy": accuracy} # map train dataset __lowerCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) train_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) # same for validation dataset __lowerCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , ) val_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) __lowerCamelCase = self.get_auto_remove_tmp_dir() __lowerCamelCase = SeqaSeqTrainingArguments( output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __lowerCamelCase = SeqaSeqTrainer( model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , ) # start training trainer.train()
330
0
'''simple docstring''' a_ : Any = """Input must be a string of 8 numbers plus letter""" a_ : Union[str, Any] = """TRWAGMYFPDXBNJZSQVHLCKE""" def a_ ( __snake_case : str ) -> bool: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Expected string as input, found {type(__snake_case ).__name__}''' raise TypeError(__snake_case ) lowerCamelCase_ =spanish_id.replace('''-''' , '''''' ).upper() if len(__snake_case ) != 9: raise ValueError(__snake_case ) try: lowerCamelCase_ =int(spanish_id_clean[0:8] ) lowerCamelCase_ =spanish_id_clean[8] except ValueError as ex: raise ValueError(__snake_case ) from ex if letter.isdigit(): raise ValueError(__snake_case ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
75
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
0
from collections.abc import Callable import numpy as np def lowerCamelCase__ ( _a , _a , _a , _a , _a): SCREAMING_SNAKE_CASE : Dict = int(np.ceil((x_end - xa) / step_size)) SCREAMING_SNAKE_CASE : Tuple = np.zeros((n + 1,)) SCREAMING_SNAKE_CASE : int = ya SCREAMING_SNAKE_CASE : int = xa for k in range(_a): SCREAMING_SNAKE_CASE : Any = y[k] + step_size * ode_func(_a , y[k]) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
76
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=1000 ): '''simple docstring''' __lowerCamelCase = p_stop __lowerCamelCase = max_length def __iter__( self ): '''simple docstring''' __lowerCamelCase = 0 __lowerCamelCase = False while not stop and count < self.max_length: yield count count += 1 __lowerCamelCase = random.random() < self.p_stop class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = [ BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 ) ] __lowerCamelCase = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of total batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' # Check the shards when the dataset is a round multiple of batch size. __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) # Check the shards when the dataset is very small. __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) __lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = [[], []] self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] __lowerCamelCase = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ): '''simple docstring''' random.seed(__UpperCAmelCase ) __lowerCamelCase = list(__UpperCAmelCase ) __lowerCamelCase = [ IterableDatasetShard( __UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , ) for i in range(__UpperCAmelCase ) ] __lowerCamelCase = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(__UpperCAmelCase ) iterable_dataset_lists.append(list(__UpperCAmelCase ) ) __lowerCamelCase = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size __lowerCamelCase = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 ) __lowerCamelCase = [] for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(__UpperCAmelCase ) < len(__UpperCAmelCase ): reference += reference self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = 42 __lowerCamelCase = RandomIterableDataset() self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) # Edge case with a very small dataset __lowerCamelCase = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase ) __lowerCamelCase = SkipBatchSampler(__UpperCAmelCase , 2 ) self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 ) __lowerCamelCase = skip_first_batches(__UpperCAmelCase , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def lowerCamelCase ( self ): '''simple docstring''' Accelerator() __lowerCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
330
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCamelCase : Optional[int] = logging.get_logger(__name__) _UpperCamelCase : Dict = "▁" _UpperCamelCase : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"} _UpperCamelCase : Dict = { "vocab_file": { "facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model", } } _UpperCamelCase : Tuple = { "facebook/xglm-564M": 20_48, } class UpperCAmelCase_ ( _a): lowerCamelCase__ : Optional[int] = VOCAB_FILES_NAMES lowerCamelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ : List[str] = ["input_ids", "attention_mask"] def __init__( self , a , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a = None , **a , ) -> None: lowercase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer lowercase__ : int = 7 lowercase__ : int = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )] lowercase__ : Optional[int] = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , ) lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(a ) ) lowercase__ : Union[str, Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase__ : List[str] = 1 # Mimic fairseq token-to-id alignment for the first 4 token lowercase__ : Union[str, Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} lowercase__ : Tuple = len(self.sp_model ) lowercase__ : List[Any] = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(a ) lowercase__ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> List[str]: lowercase__ : List[Any] = self.__dict__.copy() lowercase__ : Tuple = None lowercase__ : Dict = self.sp_model.serialized_model_proto() return state def __setstate__( self , a ) -> int: lowercase__ : Optional[Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowercase__ : Tuple = {} lowercase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _UpperCAmelCase ( self , a , a = None ) -> List[int]: if token_ids_a is None: return [self.sep_token_id] + token_ids_a lowercase__ : Optional[Any] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def _UpperCAmelCase ( self , a , a = None , a = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a , token_ids_a=a , already_has_special_tokens=a ) if token_ids_a is None: return [1] + ([0] * len(a )) return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) def _UpperCAmelCase ( self , a , a = None ) -> List[int]: lowercase__ : Union[str, Any] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def _UpperCAmelCase ( self ) -> Optional[int]: return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def _UpperCAmelCase ( self ) -> Dict: lowercase__ : Optional[Any] = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _UpperCAmelCase ( self , a ) -> List[str]: return self.sp_model.encode(a , out_type=a ) def _UpperCAmelCase ( self , a ) -> Any: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase__ : Any = self.sp_model.PieceToId(a ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _UpperCAmelCase ( self , a ) -> List[Any]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _UpperCAmelCase ( self , a ) -> Dict: lowercase__ : List[Any] = ''.join(a ).replace(a , ' ' ).strip() return out_string def _UpperCAmelCase ( self , a , a = None ) -> Tuple[str]: if not os.path.isdir(a ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ : Any = os.path.join( a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , a ) elif not os.path.isfile(self.vocab_file ): with open(a , 'wb' ) as fi: lowercase__ : Any = self.sp_model.serialized_model_proto() fi.write(a ) return (out_vocab_file,)
77
def a__ ( _UpperCamelCase : int ): __lowerCamelCase = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
330
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available snake_case_ = { """configuration_squeezebert""": [ """SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SqueezeBertConfig""", """SqueezeBertOnnxConfig""", ], """tokenization_squeezebert""": ["""SqueezeBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ["""SqueezeBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ """SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """SqueezeBertForMaskedLM""", """SqueezeBertForMultipleChoice""", """SqueezeBertForQuestionAnswering""", """SqueezeBertForSequenceClassification""", """SqueezeBertForTokenClassification""", """SqueezeBertModel""", """SqueezeBertModule""", """SqueezeBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
78
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ = 16 a_ = 32 def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : int = 16 ): __lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' ) def tokenize_function(_UpperCamelCase : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCamelCase = datasets.map( _UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' ) def collate_fn(_UpperCamelCase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCamelCase = 16 elif accelerator.mixed_precision != "no": __lowerCamelCase = 8 else: __lowerCamelCase = None return tokenizer.pad( _UpperCamelCase ,padding='''longest''' ,max_length=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_tensors='''pt''' ,) # Instantiate dataloaders. __lowerCamelCase = DataLoader( tokenized_datasets['''train'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) __lowerCamelCase = DataLoader( tokenized_datasets['''validation'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders a_ = mocked_dataloaders # noqa: F811 def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ): # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,_UpperCamelCase ) == "1": __lowerCamelCase = 2 # Initialize accelerator __lowerCamelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCamelCase = config['''lr'''] __lowerCamelCase = int(config['''num_epochs'''] ) __lowerCamelCase = int(config['''seed'''] ) __lowerCamelCase = int(config['''batch_size'''] ) __lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=_UpperCamelCase ) def inner_training_loop(_UpperCamelCase : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(_UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCamelCase = model.to(accelerator.device ) # Instantiate optimizer __lowerCamelCase = AdamW(params=model.parameters() ,lr=_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = get_dataloaders(_UpperCamelCase ,_UpperCamelCase ) # Instantiate scheduler __lowerCamelCase = get_linear_schedule_with_warmup( optimizer=_UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(_UpperCamelCase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = accelerator.prepare( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) # Now we train the model for epoch in range(_UpperCamelCase ): model.train() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.loss accelerator.backward(_UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCamelCase = model(**_UpperCamelCase ) __lowerCamelCase = outputs.logits.argmax(dim=-1 ) __lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_UpperCamelCase ,references=_UpperCamelCase ,) __lowerCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" ,_UpperCamelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a__ ( ): __lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' ,) parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_UpperCamelCase ,_UpperCamelCase ) if __name__ == "__main__": main()
330
0
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } lowerCamelCase_ = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } lowerCamelCase_ = { '''facebook/blenderbot_small-90M''': 5_12, } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = VOCAB_FILES_NAMES snake_case = PRETRAINED_VOCAB_FILES_MAP snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case = BlenderbotSmallTokenizer def __init__( self : Optional[int] , __UpperCAmelCase : str=None , __UpperCAmelCase : int=None , __UpperCAmelCase : Any="<|endoftext|>" , __UpperCAmelCase : Optional[int]="<|endoftext|>" , __UpperCAmelCase : Any="<|endoftext|>" , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Dict=True , **__UpperCAmelCase : str , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=__UpperCAmelCase , merges=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , ) , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , **__UpperCAmelCase , ) _A = add_prefix_space def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : int=None ): '''simple docstring''' _A = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ): '''simple docstring''' _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
79
import logging import os import threading import time try: import warnings except ImportError: a_ = None try: import msvcrt except ImportError: a_ = None try: import fcntl except ImportError: a_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: a_ = OSError # Data # ------------------------------------------------ a_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] a_ = """3.0.12""" a_ = None def a__ ( ): global _logger __lowerCamelCase = _logger or logging.getLogger(__name__ ) return _logger class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock_file return None def __str__( self ): '''simple docstring''' __lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class __lowerCAmelCase : def __init__( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = lock return None def __enter__( self ): '''simple docstring''' return self.lock def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.lock.release() return None class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase ) # The path to the lock file. __lowerCamelCase = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __lowerCamelCase = None # The default timeout value. __lowerCamelCase = timeout # We use this lock primarily for the lock counter. __lowerCamelCase = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __lowerCamelCase = 0 return None @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file @property def lowerCamelCase ( self ): '''simple docstring''' return self._timeout @timeout.setter def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = float(__UpperCAmelCase ) return None def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() def lowerCamelCase ( self ): '''simple docstring''' raise NotImplementedError() @property def lowerCamelCase ( self ): '''simple docstring''' return self._lock_file_fd is not None def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ): '''simple docstring''' # Use the default timeout, if no timeout is provided. if timeout is None: __lowerCamelCase = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file __lowerCamelCase = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(__UpperCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __lowerCamelCase = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def lowerCamelCase ( self , __UpperCAmelCase=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __lowerCamelCase = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self ): '''simple docstring''' self.acquire() return self def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' self.release() return None def __del__( self ): '''simple docstring''' self.release(force=__UpperCAmelCase ) return None def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = os.path.basename(__UpperCAmelCase ) if len(__UpperCAmelCase ) > max_length and max_length > 0: __lowerCamelCase = os.path.dirname(__UpperCAmelCase ) __lowerCamelCase = str(hash(__UpperCAmelCase ) ) __lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(__UpperCAmelCase , __UpperCAmelCase ) else: return path class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) __lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: try: msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(__UpperCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ): '''simple docstring''' __lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) try: fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__UpperCAmelCase ) else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN ) os.close(__UpperCAmelCase ) return None class __lowerCAmelCase ( lowerCAmelCase__ ): def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: __lowerCamelCase = fd return None def lowerCamelCase ( self ): '''simple docstring''' os.close(self._lock_file_fd ) __lowerCamelCase = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None a_ = None if msvcrt: a_ = WindowsFileLock elif fcntl: a_ = UnixFileLock else: a_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
330
0
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowercase_ ( unittest.TestCase ): def __a ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __a ( self ): torch.manual_seed(0 ) UpperCamelCase__ = UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return model @property def __a ( self ): torch.manual_seed(0 ) UpperCamelCase__ = UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , ) return model @property def __a ( self ): torch.manual_seed(0 ) UpperCamelCase__ = AutoencoderKL( sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , ) UpperCamelCase__ = UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return vqvae, unet @slow def __a ( self ): UpperCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) UpperCamelCase__ = DDPMScheduler() UpperCamelCase__ = AudioDiffusionPipeline(vqvae=a , unet=self.dummy_unet , mel=a , scheduler=a ) UpperCamelCase__ = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) UpperCamelCase__ = torch.Generator(device=a ).manual_seed(42 ) UpperCamelCase__ = pipe(generator=a , steps=4 ) UpperCamelCase__ = output.audios[0] UpperCamelCase__ = output.images[0] UpperCamelCase__ = torch.Generator(device=a ).manual_seed(42 ) UpperCamelCase__ = pipe(generator=a , steps=4 , return_dict=a ) UpperCamelCase__ = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] UpperCamelCase__ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10] UpperCamelCase__ = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 UpperCamelCase__ = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) UpperCamelCase__ = DDIMScheduler() UpperCamelCase__ = self.dummy_vqvae_and_unet UpperCamelCase__ = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=a , scheduler=a ) UpperCamelCase__ = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) np.random.seed(0 ) UpperCamelCase__ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) UpperCamelCase__ = torch.Generator(device=a ).manual_seed(42 ) UpperCamelCase__ = pipe(raw_audio=a , generator=a , start_step=5 , steps=10 ) UpperCamelCase__ = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] UpperCamelCase__ = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 UpperCamelCase__ = self.dummy_unet_condition UpperCamelCase__ = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=a , mel=a , scheduler=a ) UpperCamelCase__ = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) np.random.seed(0 ) UpperCamelCase__ = torch.rand((1, 1, 10) ) UpperCamelCase__ = pipe(generator=a , encoding=a ) UpperCamelCase__ = output.images[0] UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] UpperCamelCase__ = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class lowercase_ ( unittest.TestCase ): def __a ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self ): UpperCamelCase__ = torch_device UpperCamelCase__ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" ) UpperCamelCase__ = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) UpperCamelCase__ = torch.Generator(device=a ).manual_seed(42 ) UpperCamelCase__ = pipe(generator=a ) UpperCamelCase__ = output.audios[0] UpperCamelCase__ = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] UpperCamelCase__ = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
80
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase="divided_space_time" , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = num_channels __lowerCamelCase = patch_size __lowerCamelCase = num_frames __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = attention_type __lowerCamelCase = initializer_range __lowerCamelCase = scope __lowerCamelCase = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __lowerCamelCase = (image_size // patch_size) ** 2 __lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1 def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __lowerCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __lowerCamelCase = self.num_labels return config def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __lowerCamelCase = model(__UpperCAmelCase ) # verify the logits shape __lowerCamelCase = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = config_and_inputs __lowerCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowerCAmelCase__ = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerModelTester(self ) __lowerCamelCase = ConfigTester( self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' __lowerCamelCase = copy.deepcopy(__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def lowerCamelCase ( self ): '''simple docstring''' pass def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__UpperCAmelCase ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = TimesformerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' if not self.has_attentions: pass else: __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase = True for model_class in self.all_model_classes: __lowerCamelCase = self.model_tester.seq_length __lowerCamelCase = self.model_tester.num_frames __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __lowerCamelCase = len(__UpperCAmelCase ) # Check attention is always last and order is fine __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(out_len + 1 , len(__UpperCAmelCase ) ) __lowerCamelCase = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowerCamelCase ( self ): '''simple docstring''' def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): __lowerCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.hidden_states __lowerCamelCase = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) __lowerCamelCase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def a__ ( ): __lowerCamelCase = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' ) __lowerCamelCase = np.load(_UpperCamelCase ) return list(_UpperCamelCase ) @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self ): '''simple docstring''' # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( __UpperCAmelCase ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_video() __lowerCamelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**__UpperCAmelCase ) # verify the logits __lowerCamelCase = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __lowerCamelCase = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
330
0