code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=56 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=99 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE="gelu_new" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE="block_sparse" ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,) -> Tuple:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Tuple = seq_length
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Union[str, Any] = use_attention_mask
UpperCAmelCase_ : Union[str, Any] = use_token_type_ids
UpperCAmelCase_ : List[str] = use_labels
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : List[str] = num_choices
UpperCAmelCase_ : int = rescale_embeddings
UpperCAmelCase_ : List[Any] = attention_type
UpperCAmelCase_ : Dict = use_bias
UpperCAmelCase_ : Dict = block_size
UpperCAmelCase_ : List[str] = num_random_blocks
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Tuple = None
if self.use_attention_mask:
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : List[Any] = BigBirdConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_SCREAMING_SNAKE_CASE ,initializer_range=self.initializer_range ,attention_type=self.attention_type ,block_size=self.block_size ,num_random_blocks=self.num_random_blocks ,use_bias=self.use_bias ,rescale_embeddings=self.rescale_embeddings ,)
return config, input_ids, token_type_ids, attention_mask
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCAmelCase = False
lowerCAmelCase = False
def a__ ( self ) -> int:
UpperCAmelCase_ : int = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a__ ( self ) -> Optional[int]:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a__ ( self ) -> Optional[Any]:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a__ ( self ) -> Tuple:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a__ ( self ) -> int:
super().test_hidden_states_output()
@slow
def a__ ( self ) -> str:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : str = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a__ ( self ) -> List[str]:
UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Optional[int] = self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ):
return model(input_ids=_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ : Optional[int] = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ : Dict = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape ,output.shape )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="outputs" ,_SCREAMING_SNAKE_CASE=None ) -> Any:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) | 30 |
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
for i in range(1 , 1001 ):
total += i**i
return str(_lowercase )[-10:]
if __name__ == "__main__":
print(solution()) | 30 | 1 |
from ..utils import DummyObject, requires_backends
class __a( metaclass=_a ):
"""simple docstring"""
lowerCAmelCase = ['''torch''', '''torchsde''']
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(self ,['''torch''', '''torchsde'''] )
@classmethod
def a__ ( cls ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls ,['''torch''', '''torchsde'''] )
@classmethod
def a__ ( cls ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Tuple:
requires_backends(cls ,['''torch''', '''torchsde'''] ) | 30 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__a = None
__a = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__a = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = None
# Automatically constructed
lowerCAmelCase = "PIL.Image.Image"
lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase = field(default='''Image''' , init=_a , repr=_a )
def __call__( self ) -> Tuple:
return self.pa_type
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Dict = path.split('''::''' )[-1]
try:
UpperCAmelCase_ : Optional[int] = string_to_dict(_SCREAMING_SNAKE_CASE ,config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase_ : Tuple = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
UpperCAmelCase_ : Optional[Any] = None
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ,use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
UpperCAmelCase_ : List[str] = BytesIO(f.read() )
UpperCAmelCase_ : Optional[Any] = PIL.Image.open(bytes_ )
else:
UpperCAmelCase_ : List[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCAmelCase_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase_ : Dict = storage.field('''bytes''' )
else:
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase_ : int = storage.field('''path''' )
else:
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f:
UpperCAmelCase_ : Any = f.read()
return bytes_
UpperCAmelCase_ : Union[str, Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
UpperCAmelCase_ : List[str] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,)
UpperCAmelCase_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def lowerCamelCase__ ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase_ : Optional[int] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase_ : int = image.format
else:
UpperCAmelCase_ : List[Any] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(_lowercase , format=_lowercase )
return buffer.getvalue()
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if hasattr(_lowercase , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
UpperCAmelCase_ : Tuple = array.dtype
UpperCAmelCase_ : List[str] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
UpperCAmelCase_ : Dict = dtype.kind
UpperCAmelCase_ : Union[str, Any] = dtype.itemsize
UpperCAmelCase_ : Optional[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase_ : Tuple = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase_ : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase_ : Union[str, Any] = dtype_byteorder + dtype_kind + str(_lowercase )
UpperCAmelCase_ : str = np.dtype(_lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCAmelCase_ : Any = PIL.Image.fromarray(array.astype(_lowercase ) )
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
UpperCAmelCase_, UpperCAmelCase_ : Tuple = first_non_null_value(_lowercase )
if isinstance(_lowercase , _lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowercase , np.ndarray ):
UpperCAmelCase_ : Any = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
elif isinstance(_lowercase , PIL.Image.Image ):
UpperCAmelCase_ : Union[str, Any] = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
else:
return objs
else:
return objs | 30 | 1 |
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
UpperCAmelCase_ : Any = ''''''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
UpperCAmelCase_ : Any = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase_ : Union[str, Any] = B'''=''' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
UpperCAmelCase_ : int = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowercase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Tuple = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase , _lowercase ):
try:
UpperCAmelCase_ : Any = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
UpperCAmelCase_ : str = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase_ : List[Any] = encoded_data[:-padding]
UpperCAmelCase_ : List[Any] = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase_ : Tuple = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase_ : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowercase ) , 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
UpperCAmelCase_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
UpperCAmelCase_ : Optional[int] = load_dataset('''nielsr/rvlcdip-demo''' )
UpperCAmelCase_ : Optional[Any] = dataset['''train'''][0]['''image'''].convert('''RGB''' )
UpperCAmelCase_ : str = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.logits
UpperCAmelCase_ : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.float ,)
self.assertTrue(torch.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) | 30 | 1 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (IPNDMScheduler,)
lowerCAmelCase = (('''num_inference_steps''', 50),)
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : Any = {'''num_train_timesteps''': 1_000}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def a__ ( self ,_SCREAMING_SNAKE_CASE=0 ,**_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''num_inference_steps''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = self.dummy_sample
UpperCAmelCase_ : Any = 0.1 * sample
UpperCAmelCase_ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Tuple = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCAmelCase_ : Dict = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase_ : Optional[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
UpperCAmelCase_ : Optional[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Tuple = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : Optional[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ,_SCREAMING_SNAKE_CASE=0 ,**_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : List[Any] = kwargs.pop('''num_inference_steps''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample
UpperCAmelCase_ : Optional[Any] = 0.1 * sample
UpperCAmelCase_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.get_scheduler_config()
UpperCAmelCase_ : str = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : Dict = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase_ : int = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : str = dummy_past_residuals[:]
UpperCAmelCase_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Optional[int] = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : int = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : List[str] = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : Dict = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = 10
UpperCAmelCase_ : Any = self.dummy_model()
UpperCAmelCase_ : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
return sample
def a__ ( self ) -> str:
UpperCAmelCase_ : Tuple = dict(self.forward_default_kwargs )
UpperCAmelCase_ : List[str] = kwargs.pop('''num_inference_steps''' ,_SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : str = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = self.dummy_sample
UpperCAmelCase_ : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(_SCREAMING_SNAKE_CASE ,'''set_timesteps''' ):
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(_SCREAMING_SNAKE_CASE ,'''set_timesteps''' ):
UpperCAmelCase_ : int = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : str = dummy_past_residuals[:]
UpperCAmelCase_ : Union[str, Any] = scheduler.timesteps[5]
UpperCAmelCase_ : Dict = scheduler.timesteps[6]
UpperCAmelCase_ : Optional[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : int = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase_ : List[str] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def a__ ( self ) -> Tuple:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE ,time_step=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=_SCREAMING_SNAKE_CASE ,time_step=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.full_loop()
UpperCAmelCase_ : List[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 2_540_529 ) < 10 | 30 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) | 30 | 1 |
import operator as op
__a = 'scaler.pt'
__a = 'pytorch_model'
__a = 'random_states'
__a = 'optimizer'
__a = 'scheduler'
__a = 'pytorch_model.bin'
__a = 'pytorch_model.bin.index.json'
__a = 'model.safetensors'
__a = 'model.safetensors.index.json'
__a = '1.10.2'
__a = 'py38'
__a = '4.17.0'
__a = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
__a = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
__a = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
__a = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
__a = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
__a = '2.0.1'
__a = ['pdsh', 'standard', 'openmpi', 'mvapich']
__a = ['default', 'reduce-overhead', 'max-autotune']
__a = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__a = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
__a = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
__a = ['DEEPSPEED', 'MULTI_XPU', 'FSDP'] | 30 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0.9 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,) -> Optional[int]:
UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 30}
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Any = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize_and_center_crop
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : List[str] = crop_pct
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : Any = do_normalize
UpperCAmelCase_ : str = image_mean
UpperCAmelCase_ : List[Any] = image_std
def a__ ( self ) -> str:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def a__ ( self ) -> Dict:
UpperCAmelCase_ : str = PoolFormerImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''crop_pct''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_mean''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_std''' ) )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size ,{'''height''': 30, '''width''': 30} )
UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,) | 30 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __a( _a ):
"""simple docstring"""
@slow
@require_torch
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' ,'''prajjwal1/bert-tiny''' )
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
UpperCAmelCase_ : Optional[int] = bertabert.config.encoder.vocab_size
UpperCAmelCase_ : Any = tokenizer.sep_token_id
UpperCAmelCase_ : Optional[int] = tokenizer.cls_token_id
UpperCAmelCase_ : List[Any] = 128
UpperCAmelCase_ : int = datasets.load_dataset('''cnn_dailymail''' ,'''3.0.0''' ,split='''train[:1%]''' )
UpperCAmelCase_ : str = datasets.load_dataset('''cnn_dailymail''' ,'''3.0.0''' ,split='''validation[:1%]''' )
UpperCAmelCase_ : str = train_dataset.select(range(32 ) )
UpperCAmelCase_ : int = val_dataset.select(range(16 ) )
UpperCAmelCase_ : Optional[Any] = 4
def _map_to_encoder_decoder_inputs(_SCREAMING_SNAKE_CASE ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCAmelCase_ : List[str] = tokenizer(batch['''article'''] ,padding='''max_length''' ,truncation=_SCREAMING_SNAKE_CASE ,max_length=512 )
UpperCAmelCase_ : str = tokenizer(batch['''highlights'''] ,padding='''max_length''' ,truncation=_SCREAMING_SNAKE_CASE ,max_length=128 )
UpperCAmelCase_ : List[str] = inputs.input_ids
UpperCAmelCase_ : Optional[Any] = inputs.attention_mask
UpperCAmelCase_ : Optional[Any] = outputs.input_ids
UpperCAmelCase_ : Optional[int] = outputs.input_ids.copy()
UpperCAmelCase_ : Any = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
UpperCAmelCase_ : Dict = outputs.attention_mask
assert all(len(_SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(_SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = pred.label_ids
UpperCAmelCase_ : Dict = pred.predictions
# all unnecessary tokens are removed
UpperCAmelCase_ : List[str] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE ,skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE ,skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_SCREAMING_SNAKE_CASE ) )] ) / len(_SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
UpperCAmelCase_ : Union[str, Any] = train_dataset.map(
_map_to_encoder_decoder_inputs ,batched=_SCREAMING_SNAKE_CASE ,batch_size=_SCREAMING_SNAKE_CASE ,remove_columns=['''article''', '''highlights'''] ,)
train_dataset.set_format(
type='''torch''' ,columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] ,)
# same for validation dataset
UpperCAmelCase_ : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs ,batched=_SCREAMING_SNAKE_CASE ,batch_size=_SCREAMING_SNAKE_CASE ,remove_columns=['''article''', '''highlights'''] ,)
val_dataset.set_format(
type='''torch''' ,columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] ,)
UpperCAmelCase_ : Optional[int] = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : List[Any] = SeqaSeqTrainingArguments(
output_dir=_SCREAMING_SNAKE_CASE ,per_device_train_batch_size=_SCREAMING_SNAKE_CASE ,per_device_eval_batch_size=_SCREAMING_SNAKE_CASE ,predict_with_generate=_SCREAMING_SNAKE_CASE ,evaluation_strategy='''steps''' ,do_train=_SCREAMING_SNAKE_CASE ,do_eval=_SCREAMING_SNAKE_CASE ,warmup_steps=0 ,eval_steps=2 ,logging_steps=2 ,)
# instantiate trainer
UpperCAmelCase_ : int = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE ,args=_SCREAMING_SNAKE_CASE ,compute_metrics=_compute_metrics ,train_dataset=_SCREAMING_SNAKE_CASE ,eval_dataset=_SCREAMING_SNAKE_CASE ,tokenizer=_SCREAMING_SNAKE_CASE ,)
# start training
trainer.train() | 30 |
import unittest
import numpy as np
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = None , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = np.shape(_lowercase )
UpperCAmelCase_ : Optional[Any] = np.shape(_lowercase )
UpperCAmelCase_ : Tuple = np.shape(_lowercase )
if shape_a[0] != shape_b[0]:
UpperCAmelCase_ : Tuple = (
'''Expected the same number of rows for A and B. '''
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(_lowercase )
if shape_b[1] != shape_c[1]:
UpperCAmelCase_ : List[Any] = (
'''Expected the same number of columns for B and C. '''
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(_lowercase )
UpperCAmelCase_ : Dict = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase_ : Any = np.linalg.inv(_lowercase )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : List[str] = np.array([[2, 1], [6, 3]] )
UpperCAmelCase_ : Tuple = schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.block([[a, b], [b.T, c]] )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = np.linalg.det(_SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE ,det_a * det_s )
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> None:
UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : int = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 30 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__a = ['text', 'image', 'audio']
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_lowercase , _lowercase ):
inputs.append(create_inputs(_lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = []
for output in outputs:
if isinstance(_lowercase , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(_lowercase , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(_lowercase , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class __a:
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
self.assertTrue(hasattr(self.tool ,'''inputs''' ) )
self.assertTrue(hasattr(self.tool ,'''outputs''' ) )
UpperCAmelCase_ : str = self.tool.inputs
for _input in inputs:
if isinstance(_input ,_SCREAMING_SNAKE_CASE ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCAmelCase_ : Union[str, Any] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Union[str, Any] = create_inputs(self.tool.inputs )
UpperCAmelCase_ : Any = self.tool(*_SCREAMING_SNAKE_CASE )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCAmelCase_ : Optional[int] = [outputs]
self.assertListEqual(output_types(_SCREAMING_SNAKE_CASE ) ,self.tool.outputs )
def a__ ( self ) -> Optional[int]:
self.assertTrue(hasattr(self.tool ,'''description''' ) )
self.assertTrue(hasattr(self.tool ,'''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = create_inputs(self.tool.inputs )
UpperCAmelCase_ : str = self.tool(*_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = [outputs]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(self.tool.outputs ) )
for output, output_type in zip(_SCREAMING_SNAKE_CASE ,self.tool.outputs ):
UpperCAmelCase_ : Union[str, Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Union[str, Any] = create_inputs(self.tool.inputs )
UpperCAmelCase_ : Tuple = []
for _input, input_type in zip(_SCREAMING_SNAKE_CASE ,self.tool.inputs ):
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCAmelCase_ : List[Any] = self.tool(*_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = [outputs]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(self.tool.outputs ) ) | 30 |
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
UpperCAmelCase_ : Any = ''''''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
UpperCAmelCase_ : Any = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase_ : Union[str, Any] = B'''=''' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
UpperCAmelCase_ : int = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowercase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Tuple = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase , _lowercase ):
try:
UpperCAmelCase_ : Any = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
UpperCAmelCase_ : str = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase_ : List[Any] = encoded_data[:-padding]
UpperCAmelCase_ : List[Any] = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase_ : Tuple = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase_ : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowercase ) , 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=18 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,) -> List[Any]:
UpperCAmelCase_ : int = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Dict = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Any = do_resize
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Any = apply_ocr
def a__ ( self ) -> Dict:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self ) -> str:
UpperCAmelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> int:
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''apply_ocr''' ) )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 18} )
UpperCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
def a__ ( self ) -> Any:
pass
def a__ ( self ) -> Tuple:
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
self.assertIsInstance(encoding.words ,_SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes ,_SCREAMING_SNAKE_CASE )
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def a__ ( self ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : str = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def a__ ( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Dict = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : Optional[int] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def a__ ( self ) -> int:
# with apply_OCR = True
UpperCAmelCase_ : Optional[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCAmelCase_ : str = load_dataset('''hf-internal-testing/fixtures_docvqa''' ,split='''test''' )
UpperCAmelCase_ : Tuple = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
UpperCAmelCase_ : Tuple = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCAmelCase_ : List[str] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCAmelCase_ : Optional[int] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes ,_SCREAMING_SNAKE_CASE )
# with apply_OCR = False
UpperCAmelCase_ : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) ) | 30 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 0
@slow
def a__ ( self ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,20 )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained('''./''' ,tokenizer_type='''xxx''' )
@require_tokenizers
def a__ ( self ) -> Optional[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase_ : Any = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
else:
self.assertEqual(tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
@require_tokenizers
def a__ ( self ) -> List[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' ,):
UpperCAmelCase_ : int = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def a__ ( self ) -> Optional[Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
UpperCAmelCase_ : int = TOKENIZER_MAPPING.values()
UpperCAmelCase_ : List[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Tuple:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ,use_fast=_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = '''Hello, world. How are you?'''
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
@require_tokenizers
def a__ ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
self.assertEqual(tokenizer.vocab_size ,30_000 )
self.assertEqual(tokenizer.unk_token ,'''[UNK]''' )
self.assertEqual(tokenizer.padding_side ,'''right''' )
self.assertEqual(tokenizer.truncation_side ,'''right''' )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size ,12 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
# Check we can load the tokenizer config of an online model.
UpperCAmelCase_ : int = get_tokenizer_config('''bert-base-cased''' )
UpperCAmelCase_ : Optional[int] = config.pop('''_commit_hash''' ,_SCREAMING_SNAKE_CASE )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_SCREAMING_SNAKE_CASE ,{'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase_ : Any = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] ,'''BertTokenizer''' )
def a__ ( self ) -> str:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def a__ ( self ) -> int:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
# Can register in two steps
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = False
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = NewTokenizer
lowerCAmelCase = False
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
def a__ ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' )
def a__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' )
def a__ ( self ) -> Any:
# Make sure we have cached the tokenizer.
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 ) | 30 | 1 |
from __future__ import annotations
__a = tuple[int, int, int]
__a = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
__a = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
__a = 'FOBHMDKEXQNRAULPGSJVTYICZW'
__a = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
__a = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
__a = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
__a = 'SGLCPQWZHKXAREONTFBVIYJUDM'
__a = 'HVSICLTYKQUBXDWAJZOMFGPREN'
__a = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
__a = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
__a = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if (unique_rotsel := len(set(_lowercase ) )) < 3:
UpperCAmelCase_ : List[str] = f'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(_lowercase )
# Checks if rotor positions are valid
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : str = rotpos
if not 0 < rotorposa <= len(_lowercase ):
UpperCAmelCase_ : Dict = f'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(_lowercase )
if not 0 < rotorposa <= len(_lowercase ):
UpperCAmelCase_ : int = f'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_lowercase )
if not 0 < rotorposa <= len(_lowercase ):
UpperCAmelCase_ : List[Any] = f'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_lowercase )
# Validates string and returns dict
UpperCAmelCase_ : int = _plugboard(_lowercase )
return rotpos, rotsel, pbdict
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Union[str, Any] = f'''Plugboard setting isn\'t type string ({type(_lowercase )})'''
raise TypeError(_lowercase )
elif len(_lowercase ) % 2 != 0:
UpperCAmelCase_ : Optional[int] = f'''Odd number of symbols ({len(_lowercase )})'''
raise Exception(_lowercase )
elif pbstring == "":
return {}
pbstring.replace(''' ''' , '''''' )
# Checks if all characters are unique
UpperCAmelCase_ : List[Any] = set()
for i in pbstring:
if i not in abc:
UpperCAmelCase_ : Optional[int] = f'''\'{i}\' not in list of symbols'''
raise Exception(_lowercase )
elif i in tmppbl:
UpperCAmelCase_ : Optional[Any] = f'''Duplicate symbol ({i})'''
raise Exception(_lowercase )
else:
tmppbl.add(_lowercase )
del tmppbl
# Created the dictionary
UpperCAmelCase_ : List[Any] = {}
for j in range(0 , len(_lowercase ) - 1 , 2 ):
UpperCAmelCase_ : Any = pbstring[j + 1]
UpperCAmelCase_ : int = pbstring[j]
return pb
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = (rotora, rotora, rotora) , _lowercase = "" , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = text.upper()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Tuple = _validator(
_lowercase , _lowercase , plugb.upper() )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : int = rotor_position
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
UpperCAmelCase_ : Tuple = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
UpperCAmelCase_ : Dict = plugboard[symbol]
# rotor ra --------------------------
UpperCAmelCase_ : Tuple = abc.index(_lowercase ) + rotorposa
UpperCAmelCase_ : Union[str, Any] = rotora[index % len(_lowercase )]
# rotor rb --------------------------
UpperCAmelCase_ : Optional[int] = abc.index(_lowercase ) + rotorposa
UpperCAmelCase_ : Optional[Any] = rotora[index % len(_lowercase )]
# rotor rc --------------------------
UpperCAmelCase_ : int = abc.index(_lowercase ) + rotorposa
UpperCAmelCase_ : int = rotora[index % len(_lowercase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
UpperCAmelCase_ : Optional[Any] = reflector[symbol]
# 2nd rotors
UpperCAmelCase_ : int = abc[rotora.index(_lowercase ) - rotorposa]
UpperCAmelCase_ : Union[str, Any] = abc[rotora.index(_lowercase ) - rotorposa]
UpperCAmelCase_ : List[str] = abc[rotora.index(_lowercase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
UpperCAmelCase_ : Dict = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowercase ):
UpperCAmelCase_ : Dict = 0
rotorposa += 1
if rotorposa >= len(_lowercase ):
UpperCAmelCase_ : List[Any] = 0
rotorposa += 1
if rotorposa >= len(_lowercase ):
UpperCAmelCase_ : List[str] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowercase )
return "".join(_lowercase )
if __name__ == "__main__":
__a = 'This is my Python script that emulates the Enigma machine from WWII.'
__a = (1, 1, 1)
__a = 'pictures'
__a = (rotora, rotora, rotora)
__a = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb)) | 30 |
from functools import reduce
__a = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase__ ( _lowercase = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) )
for i in range(len(_lowercase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 30 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__a = 16
__a = 32
def lowerCamelCase__ ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_lowercase )
UpperCAmelCase_ : int = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : Tuple = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(_lowercase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ : Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
model.eval()
UpperCAmelCase_ : List[str] = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_lowercase )
UpperCAmelCase_ : Optional[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
UpperCAmelCase_ : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ : Union[str, Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
UpperCAmelCase_ : Union[str, Any] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : Optional[Any] = config['''lr''']
UpperCAmelCase_ : Union[str, Any] = int(config['''num_epochs'''] )
UpperCAmelCase_ : Optional[Any] = int(config['''seed'''] )
UpperCAmelCase_ : int = int(config['''batch_size'''] )
UpperCAmelCase_ : List[str] = args.model_name_or_path
set_seed(_lowercase )
UpperCAmelCase_, UpperCAmelCase_ : int = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
UpperCAmelCase_ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ : str = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ : Optional[int] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[Any] = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ : List[str] = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
UpperCAmelCase_ : List[str] = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ : Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : str = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase_ : List[str] = num_epochs
if args.partial_train_epoch is not None:
UpperCAmelCase_ : Union[str, Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase_ : Dict = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCAmelCase_ : int = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCAmelCase_ : Union[str, Any] = int(_lowercase ) + 1
UpperCAmelCase_ : List[str] = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase )
accelerator.print('''resumed checkpoint performance:''' , _lowercase )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f:
UpperCAmelCase_ : Any = json.load(_lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCAmelCase_ : Union[str, Any] = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
UpperCAmelCase_ : List[str] = model(**_lowercase )
UpperCAmelCase_ : int = outputs.loss
UpperCAmelCase_ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCAmelCase_ : str = f'''epoch_{epoch}'''
UpperCAmelCase_ : Optional[Any] = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
UpperCAmelCase_ : Any = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase_ : Union[str, Any] = accuracy
UpperCAmelCase_ : List[str] = lr_scheduler.get_lr()[0]
UpperCAmelCase_ : List[Any] = optimizer.param_groups[0]['''lr''']
UpperCAmelCase_ : Any = epoch
UpperCAmelCase_ : Any = overall_step
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'''state_{epoch}.json''' ) , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_lowercase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowercase , )
parser.add_argument(
'''--output_dir''' , type=_lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=_lowercase , default=_lowercase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=_lowercase , default=_lowercase , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=_lowercase , default=2 , help='''Number of train epochs.''' , )
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
UpperCAmelCase_ : Dict = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main() | 30 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
UpperCAmelCase_ : Tuple = precision
UpperCAmelCase_ : Optional[Any] = ceil(precision / 14 )
UpperCAmelCase_ : int = 426880 * Decimal(10005 ).sqrt()
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : List[Any] = 13591409
UpperCAmelCase_ : Optional[Any] = Decimal(_lowercase )
for k in range(1 , _lowercase ):
UpperCAmelCase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__a = 50
print(F"""The first {n} digits of pi is: {pi(n)}""") | 30 | 1 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , _lowercase )
UpperCAmelCase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCAmelCase_ : Union[str, Any] = dataset_size < in_memory_max_size
else:
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : str = is_small_dataset(_lowercase )
assert result == expected | 30 |
from __future__ import annotations
import math
__a = '2020.9.26'
__a = 'xcodz-dot, cclaus, dhruvmanila'
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ):
UpperCAmelCase_ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(_lowercase )
UpperCAmelCase_ : Tuple = ((x * distance) / (z + distance)) * scale
UpperCAmelCase_ : str = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Axis must be a str''' )
UpperCAmelCase_ : Optional[Any] = locals()
del input_variables["axis"]
if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ):
UpperCAmelCase_ : List[Any] = (
'''Input values except axis must either be float or int: '''
f'''{list(input_variables.values() )}'''
)
raise TypeError(_lowercase )
UpperCAmelCase_ : Dict = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
UpperCAmelCase_ : Optional[int] = x * math.cos(_lowercase ) - y * math.sin(_lowercase )
UpperCAmelCase_ : List[Any] = y * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z
elif axis == "x":
UpperCAmelCase_ : Any = y * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : int = z * math.cos(_lowercase ) + y * math.sin(_lowercase )
UpperCAmelCase_ : Dict = x
elif axis == "y":
UpperCAmelCase_ : Union[str, Any] = x * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""") | 30 | 1 |
def lowerCamelCase__ ( _lowercase = 10 , _lowercase = 22 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = range(1 , _lowercase )
UpperCAmelCase_ : Optional[int] = range(1 , _lowercase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""") | 30 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__a = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__a = concatenate_datasets
__a = DownloadConfig
__a = DownloadManager
__a = DownloadMode
__a = DownloadConfig
__a = DownloadMode
__a = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 30 | 1 |
import re
from filelock import FileLock
try:
import nltk
__a = True
except (ImportError, ModuleNotFoundError):
__a = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
re.sub('''<n>''' , '''''' , _lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowercase ) ) | 30 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
while a != 0:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = b % a, a
return b
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if gcd(_lowercase , _lowercase ) != 1:
UpperCAmelCase_ : int = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowercase )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = 1, 0, a
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = 0, 1, m
while va != 0:
UpperCAmelCase_ : List[Any] = ua // va
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 30 | 1 |
from heapq import heappop, heappush
import numpy as np
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_ : Dict = grid.shape
UpperCAmelCase_ : str = [-1, 1, 0, 0]
UpperCAmelCase_ : Dict = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCAmelCase_, UpperCAmelCase_ : Any = [(0, source)], set()
UpperCAmelCase_ : int = np.full((rows, cols) , np.inf )
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : List[str] = np.empty((rows, cols) , dtype=_lowercase )
UpperCAmelCase_ : Tuple = None
while queue:
((UpperCAmelCase_), (UpperCAmelCase_)) : Dict = heappop(_lowercase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCAmelCase_ : Optional[Any] = []
while (x, y) != source:
path.append((x, y) )
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = predecessors[x, y]
path.append(_lowercase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_lowercase ) ):
UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCAmelCase_ : Tuple = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_lowercase , (dist + 1, (nx, ny)) )
UpperCAmelCase_ : str = dist + 1
UpperCAmelCase_ : List[str] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCAmelCase = '''CIDAS/clipseg-rd64-refined'''
lowerCAmelCase = '''image_segmenter'''
lowerCAmelCase = CLIPSegForImageSegmentation
lowerCAmelCase = ['''image''', '''text''']
lowerCAmelCase = ['''image''']
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self ,['''vision'''] )
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.pre_processor(text=[label] ,images=[image] ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
with torch.no_grad():
UpperCAmelCase_ : Dict = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : Dict = outputs.cpu().detach().numpy()
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) ) | 30 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = 1.5
UpperCAmelCase_ : Union[str, Any] = int(factor * num_class_images )
UpperCAmelCase_ : Optional[Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_lowercase , aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' , exist_ok=_lowercase )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCAmelCase_ : List[Any] = client.query(text=_lowercase )
if len(_lowercase ) >= factor * num_class_images or num_images > 1E4:
break
else:
UpperCAmelCase_ : List[str] = int(factor * num_images )
UpperCAmelCase_ : Optional[int] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_lowercase , aesthetic_weight=0.1 , )
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : List[Any] = tqdm(desc='''downloading real regularization images''' , total=_lowercase )
with open(f'''{class_data_dir}/caption.txt''' , '''w''' ) as fa, open(f'''{class_data_dir}/urls.txt''' , '''w''' ) as fa, open(
f'''{class_data_dir}/images.txt''' , '''w''' ) as fa:
while total < num_class_images:
UpperCAmelCase_ : Tuple = class_images[count]
count += 1
try:
UpperCAmelCase_ : Optional[Any] = requests.get(images['''url'''] )
if img.status_code == 200:
UpperCAmelCase_ : Dict = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = argparse.ArgumentParser('''''' , add_help=_lowercase )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=_lowercase , type=_lowercase )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=_lowercase , type=_lowercase )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=_lowercase )
return parser.parse_args()
if __name__ == "__main__":
__a = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 30 |
import numpy as np
import datasets
__a = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__a = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__a = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' ,id='''sequence''' ) ,id='''X''' ),
} ) ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# convert to numpy arrays
UpperCAmelCase_ : str = np.array(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.array(_SCREAMING_SNAKE_CASE )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
UpperCAmelCase_ : List[str] = X - np.mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = np.cov(reference_distribution.T )
try:
UpperCAmelCase_ : Any = np.linalg.inv(_SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
UpperCAmelCase_ : List[str] = np.linalg.pinv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = np.dot(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.dot(_SCREAMING_SNAKE_CASE ,X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 30 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__a = logging.get_logger(__name__)
__a = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = '''nat'''
lowerCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=[3, 4, 6, 5] ,_SCREAMING_SNAKE_CASE=[2, 4, 8, 16] ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3.0 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[Any] = embed_dim
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = num_heads
UpperCAmelCase_ : Optional[int] = kernel_size
UpperCAmelCase_ : Any = mlp_ratio
UpperCAmelCase_ : Optional[Any] = qkv_bias
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : Any = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : List[Any] = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
UpperCAmelCase_ : Dict = layer_scale_init_value
UpperCAmelCase_ : Any = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 ,len(_SCREAMING_SNAKE_CASE ) + 1 )]
UpperCAmelCase_, UpperCAmelCase_ : Dict = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE ,out_indices=_SCREAMING_SNAKE_CASE ,stage_names=self.stage_names ) | 30 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__a = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__a = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_a )
class __a:
"""simple docstring"""
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
elif titles is None or texts is None:
UpperCAmelCase_ : List[str] = titles if texts is None else texts
return super().__call__(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[Any] = titles if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [titles]
UpperCAmelCase_ : List[str] = texts if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [texts]
UpperCAmelCase_ : Any = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = questions if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [questions] * n_passages
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.''' )
UpperCAmelCase_ : Tuple = super().__call__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : int = super().__call__(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : Optional[int] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ : Dict = attention_mask
return self.pad(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 4 ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = reader_input['''input_ids''']
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = reader_output[:3]
UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = sorted(range(_SCREAMING_SNAKE_CASE ) ,reverse=_SCREAMING_SNAKE_CASE ,key=relevance_logits.__getitem__ )
UpperCAmelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_ : List[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_SCREAMING_SNAKE_CASE ,top_spans=_SCREAMING_SNAKE_CASE ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_SCREAMING_SNAKE_CASE ,start_index=_SCREAMING_SNAKE_CASE ,end_index=_SCREAMING_SNAKE_CASE ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = []
for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ : int = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] ,reverse=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
UpperCAmelCase_ : str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = ['''input_ids''', '''attention_mask'''] | 30 | 1 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=0.2 ,_SCREAMING_SNAKE_CASE=0.2 ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : int = bp_numa
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : Optional[Any] = conva_get[:2]
UpperCAmelCase_ : Optional[Any] = conva_get[2]
UpperCAmelCase_ : Dict = size_pa
UpperCAmelCase_ : Dict = rate_w
UpperCAmelCase_ : Dict = rate_t
UpperCAmelCase_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCAmelCase_ : Dict = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : Optional[int] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : List[Any] = -2 * np.random.rand(self.conva[1] ) + 1
UpperCAmelCase_ : Optional[int] = -2 * np.random.rand(self.num_bpa ) + 1
UpperCAmelCase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> int:
# save model dict with pickle
UpperCAmelCase_ : Dict = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_SCREAMING_SNAKE_CASE ,'''wb''' ) as f:
pickle.dump(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
print(f'''Model saved: {save_path}''' )
@classmethod
def a__ ( cls ,_SCREAMING_SNAKE_CASE ) -> Tuple:
# read saved model
with open(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f:
UpperCAmelCase_ : Tuple = pickle.load(_SCREAMING_SNAKE_CASE ) # noqa: S301
UpperCAmelCase_ : Optional[Any] = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
UpperCAmelCase_ : List[Any] = model_dic.get('''size_pooling1''' )
UpperCAmelCase_ : Union[str, Any] = model_dic.get('''num_bp1''' )
UpperCAmelCase_ : List[str] = model_dic.get('''num_bp2''' )
UpperCAmelCase_ : Tuple = model_dic.get('''num_bp3''' )
UpperCAmelCase_ : Union[str, Any] = model_dic.get('''rate_weight''' )
UpperCAmelCase_ : Optional[Any] = model_dic.get('''rate_thre''' )
# create model instance
UpperCAmelCase_ : List[Any] = CNN(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# modify model parameter
UpperCAmelCase_ : str = model_dic.get('''w_conv1''' )
UpperCAmelCase_ : Any = model_dic.get('''wkj''' )
UpperCAmelCase_ : Any = model_dic.get('''vji''' )
UpperCAmelCase_ : Any = model_dic.get('''thre_conv1''' )
UpperCAmelCase_ : List[Any] = model_dic.get('''thre_bp2''' )
UpperCAmelCase_ : int = model_dic.get('''thre_bp3''' )
return conv_ins
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
return 1 / (1 + np.exp(-1 * x ))
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
return round(_SCREAMING_SNAKE_CASE ,3 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
# convolution process
UpperCAmelCase_ : int = convs[0]
UpperCAmelCase_ : Optional[int] = convs[1]
UpperCAmelCase_ : Any = np.shape(_SCREAMING_SNAKE_CASE )[0]
# get the data slice of original image data, data_focus
UpperCAmelCase_ : Tuple = []
for i_focus in range(0 ,size_data - size_conv + 1 ,_SCREAMING_SNAKE_CASE ):
for j_focus in range(0 ,size_data - size_conv + 1 ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_SCREAMING_SNAKE_CASE )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Any = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = []
for i_focus in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Any = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Optional[Any] = np.asmatrix(_SCREAMING_SNAKE_CASE ).reshape(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
data_featuremap.append(_SCREAMING_SNAKE_CASE )
# expanding the data slice to One dimenssion
UpperCAmelCase_ : str = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : List[str] = np.asarray(_SCREAMING_SNAKE_CASE )
return focus_list, data_featuremap
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE="average_pool" ) -> Any:
# pooling process
UpperCAmelCase_ : Dict = len(featuremaps[0] )
UpperCAmelCase_ : Optional[Any] = int(size_map / size_pooling )
UpperCAmelCase_ : Optional[Any] = []
for i_map in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Optional[Any] = featuremaps[i_map]
UpperCAmelCase_ : int = []
for i_focus in range(0 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
for j_focus in range(0 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : int = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_SCREAMING_SNAKE_CASE ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Tuple = np.asmatrix(_SCREAMING_SNAKE_CASE ).reshape(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
featuremap_pooled.append(_SCREAMING_SNAKE_CASE )
return featuremap_pooled
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Any:
# expanding three dimension data to one dimension list
UpperCAmelCase_ : Dict = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Optional[Any] = np.shape(data[i] )
UpperCAmelCase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] )
UpperCAmelCase_ : Tuple = data_listed.getA().tolist()[0]
data_expanded.extend(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = np.asarray(_SCREAMING_SNAKE_CASE )
return data_expanded
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
# expanding matrix to one dimension list
UpperCAmelCase_ : List[str] = np.asarray(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = np.shape(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = 0
for i_map in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : int = np.ones((size_map, size_map) )
for i in range(0 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
for j in range(0 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = pd_pool[
i_pool
]
UpperCAmelCase_ : int = i_pool + 1
UpperCAmelCase_ : Optional[Any] = np.multiply(
_SCREAMING_SNAKE_CASE ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(_SCREAMING_SNAKE_CASE )
return pd_all
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=bool ) -> str:
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_SCREAMING_SNAKE_CASE )) )
print((''' - - Shape: Teach_Data ''', np.shape(_SCREAMING_SNAKE_CASE )) )
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Union[str, Any] = 10_000
while rp < n_repeat and mse >= error_accuracy:
UpperCAmelCase_ : Dict = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(_SCREAMING_SNAKE_CASE ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCAmelCase_ : Optional[int] = np.asmatrix(datas_train[p] )
UpperCAmelCase_ : Tuple = np.asarray(datas_teach[p] )
UpperCAmelCase_, UpperCAmelCase_ : str = self.convolute(
_SCREAMING_SNAKE_CASE ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : List[Any] = self.pooling(_SCREAMING_SNAKE_CASE ,self.size_poolinga )
UpperCAmelCase_ : Optional[int] = np.shape(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self._expand(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = data_bp_input
UpperCAmelCase_ : Union[str, Any] = np.dot(_SCREAMING_SNAKE_CASE ,self.vji.T ) - self.thre_bpa
UpperCAmelCase_ : Union[str, Any] = self.sig(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = np.dot(_SCREAMING_SNAKE_CASE ,self.wkj.T ) - self.thre_bpa
UpperCAmelCase_ : Dict = self.sig(_SCREAMING_SNAKE_CASE )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCAmelCase_ : str = np.multiply(
(data_teach - bp_outa) ,np.multiply(_SCREAMING_SNAKE_CASE ,(1 - bp_outa) ) )
UpperCAmelCase_ : str = np.multiply(
np.dot(_SCREAMING_SNAKE_CASE ,self.wkj ) ,np.multiply(_SCREAMING_SNAKE_CASE ,(1 - bp_outa) ) )
UpperCAmelCase_ : str = np.dot(_SCREAMING_SNAKE_CASE ,self.vji )
UpperCAmelCase_ : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCAmelCase_ : List[Any] = pd_conva_pooled.T.getA().tolist()
UpperCAmelCase_ : int = self._calculate_gradient_from_pool(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCAmelCase_ : int = self._expand_mat(pd_conva_all[k_conv] )
UpperCAmelCase_ : str = self.rate_weight * np.dot(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCAmelCase_ : int = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCAmelCase_ : str = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : Union[str, Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
UpperCAmelCase_ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCAmelCase_ : Optional[int] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCAmelCase_ : Tuple = rp + 1
UpperCAmelCase_ : List[str] = error_count / patterns
all_mse.append(_SCREAMING_SNAKE_CASE )
def draw_error():
UpperCAmelCase_ : List[str] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_SCREAMING_SNAKE_CASE ,'''+-''' )
plt.plot(_SCREAMING_SNAKE_CASE ,'''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_SCREAMING_SNAKE_CASE ,alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
# model predict
UpperCAmelCase_ : str = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_SCREAMING_SNAKE_CASE )) )
for p in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : List[str] = np.asmatrix(datas_test[p] )
UpperCAmelCase_, UpperCAmelCase_ : Tuple = self.convolute(
_SCREAMING_SNAKE_CASE ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Dict = self.pooling(_SCREAMING_SNAKE_CASE ,self.size_poolinga )
UpperCAmelCase_ : Tuple = self._expand(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = data_bp_input
UpperCAmelCase_ : List[Any] = bp_outa * self.vji.T - self.thre_bpa
UpperCAmelCase_ : Dict = self.sig(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = bp_outa * self.wkj.T - self.thre_bpa
UpperCAmelCase_ : List[str] = self.sig(_SCREAMING_SNAKE_CASE )
produce_out.extend(bp_outa.getA().tolist() )
UpperCAmelCase_ : List[str] = [list(map(self.do_round ,_SCREAMING_SNAKE_CASE ) ) for each in produce_out]
return np.asarray(_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
# return the data of image after convoluting process so we can check it out
UpperCAmelCase_ : Union[str, Any] = np.asmatrix(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_ : Tuple = self.convolute(
_SCREAMING_SNAKE_CASE ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : str = self.pooling(_SCREAMING_SNAKE_CASE ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass | 30 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''dandelin/vilt-b32-finetuned-vqa'''
lowerCAmelCase = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
lowerCAmelCase = '''image_qa'''
lowerCAmelCase = AutoProcessor
lowerCAmelCase = AutoModelForVisualQuestionAnswering
lowerCAmelCase = ['''image''', '''text''']
lowerCAmelCase = ['''text''']
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self ,['''vision'''] )
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return self.pre_processor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Tuple:
with torch.no_grad():
return self.model(**_SCREAMING_SNAKE_CASE ).logits
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx] | 30 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''wav2vec2'''
def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.05 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="sum" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1_500) ,_SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Tuple = feat_extract_norm
UpperCAmelCase_ : List[Any] = feat_extract_activation
UpperCAmelCase_ : str = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = conv_bias
UpperCAmelCase_ : str = num_conv_pos_embeddings
UpperCAmelCase_ : Any = num_conv_pos_embedding_groups
UpperCAmelCase_ : Tuple = len(self.conv_dim )
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : str = hidden_dropout
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Tuple = activation_dropout
UpperCAmelCase_ : List[str] = feat_proj_dropout
UpperCAmelCase_ : int = final_dropout
UpperCAmelCase_ : Union[str, Any] = layerdrop
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Optional[int] = do_stable_layer_norm
UpperCAmelCase_ : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : Optional[int] = apply_spec_augment
UpperCAmelCase_ : Tuple = mask_time_prob
UpperCAmelCase_ : Optional[Any] = mask_time_length
UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks
UpperCAmelCase_ : Optional[Any] = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Union[str, Any] = num_codevectors_per_group
UpperCAmelCase_ : Any = num_codevector_groups
UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature
UpperCAmelCase_ : List[str] = feat_quantizer_dropout
UpperCAmelCase_ : Dict = num_negatives
UpperCAmelCase_ : List[str] = codevector_dim
UpperCAmelCase_ : List[str] = proj_codevector_dim
UpperCAmelCase_ : str = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : List[Any] = ctc_loss_reduction
UpperCAmelCase_ : List[str] = ctc_zero_infinity
# adapter
UpperCAmelCase_ : Optional[Any] = add_adapter
UpperCAmelCase_ : Any = adapter_kernel_size
UpperCAmelCase_ : Optional[int] = adapter_stride
UpperCAmelCase_ : List[Any] = num_adapter_layers
UpperCAmelCase_ : Optional[Any] = output_hidden_size or hidden_size
UpperCAmelCase_ : Optional[int] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = xvector_output_dim
@property
def a__ ( self ) -> Any:
return functools.reduce(operator.mul ,self.conv_stride ,1 ) | 30 | 1 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
debug_launcher(test_script.main )
def a__ ( self ) -> Union[str, Any]:
debug_launcher(test_ops.main ) | 30 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 | 1 |
from functools import reduce
__a = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase__ ( _lowercase = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) )
for i in range(len(_lowercase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 30 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1 ) -> Dict:
UpperCAmelCase_ : List[Any] = tokenizer
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
UpperCAmelCase_ : Optional[int] = n_copies
def __iter__( self ) -> Any:
UpperCAmelCase_ : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : str = start_length
UpperCAmelCase_ : Optional[int] = eof_strings
UpperCAmelCase_ : str = tokenizer
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCAmelCase_ : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = defaultdict(_lowercase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowercase ) ):
with torch.no_grad():
UpperCAmelCase_ : Dict = batch['''ids'''].shape[-1]
UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(_lowercase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase )
# each task is generated batch_size times
UpperCAmelCase_ : Union[str, Any] = batch['''task_id'''].repeat(_lowercase )
UpperCAmelCase_ : Dict = accelerator.pad_across_processes(
_lowercase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCAmelCase_, UpperCAmelCase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase_ : Union[str, Any] = generated_tokens.cpu().numpy()
UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowercase , _lowercase ):
gen_token_dict[task].append(_lowercase )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_lowercase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
code_gens[task].append(remove_last_block(_lowercase ) )
return code_gens
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_lowercase )
UpperCAmelCase_ : int = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase_ : List[Any] = '''false'''
if args.num_workers is None:
UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase_ : int = Accelerator()
set_seed(args.seed , device_specific=_lowercase )
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ : Any = tokenizer.eos_token
UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase_ : str = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase_ : Tuple = load_dataset('''openai_humaneval''' )
UpperCAmelCase_ : Dict = load_metric('''code_eval''' )
UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
UpperCAmelCase_ : str = args.n_samples // args.batch_size
UpperCAmelCase_ : str = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase_ : Optional[Any] = DataLoader(_lowercase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(_lowercase , _lowercase )
UpperCAmelCase_ : int = complete_code(
_lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , )
if accelerator.is_main_process:
UpperCAmelCase_ : Any = []
for task in tqdm(range(_lowercase ) ):
UpperCAmelCase_ : int = human_eval['''test'''][task]['''test''']
UpperCAmelCase_ : str = f'''check({human_eval["test"][task]["entry_point"]})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase_, UpperCAmelCase_ : Any = code_eval_metric.compute(
references=_lowercase , predictions=_lowercase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(_lowercase , _lowercase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 30 | 1 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if not (isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )):
raise ValueError('''longest_common_substring() takes two strings for inputs''' )
UpperCAmelCase_ : Optional[Any] = len(_lowercase )
UpperCAmelCase_ : Optional[Any] = len(_lowercase )
UpperCAmelCase_ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Tuple = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
UpperCAmelCase_ : Dict = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
UpperCAmelCase_ : Any = i
UpperCAmelCase_ : Optional[Any] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__a = logging.get_logger(__name__)
__a = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''imagegpt'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=512 + 1 ,_SCREAMING_SNAKE_CASE=32 * 32 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : Union[str, Any] = n_embd
UpperCAmelCase_ : Any = n_layer
UpperCAmelCase_ : Optional[Any] = n_head
UpperCAmelCase_ : Union[str, Any] = n_inner
UpperCAmelCase_ : List[Any] = activation_function
UpperCAmelCase_ : List[str] = resid_pdrop
UpperCAmelCase_ : str = embd_pdrop
UpperCAmelCase_ : Optional[Any] = attn_pdrop
UpperCAmelCase_ : Dict = layer_norm_epsilon
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Dict = scale_attn_weights
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : List[str] = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ : Tuple = reorder_and_upcast_attn
UpperCAmelCase_ : int = tie_word_embeddings
super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
class __a( _a ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,) -> Mapping[str, Any]:
UpperCAmelCase_ : Any = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) )
return inputs | 30 | 1 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def lowerCamelCase__ ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 30 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__a = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
__a = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase_ : Union[str, Any] = int(re.match(r'''.*layer_(\d*).*''' , _lowercase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase_ : Any = re.search(r'''[^\d](\d+)$''' , str(_lowercase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
UpperCAmelCase_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if bloom_config_file == "":
UpperCAmelCase_ : Tuple = BloomConfig()
else:
UpperCAmelCase_ : Optional[int] = BloomConfig.from_json_file(_lowercase )
if shard_model:
UpperCAmelCase_ : Any = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = {'''weight_map''': {}, '''metadata''': {}}
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = BloomConfig()
for j, file in enumerate(_lowercase ):
print('''Processing file: {}'''.format(_lowercase ) )
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : Tuple = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Any = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : Dict = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Union[str, Any] = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : Tuple = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : List[str] = tensors[key] / pretraining_tp
torch.save(
_lowercase , os.path.join(
_lowercase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase_ : Union[str, Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) )
UpperCAmelCase_ : List[Any] = BloomConfig()
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : List[str] = total_size
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_lowercase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Optional[Any] = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n'''
f.write(_lowercase )
else:
UpperCAmelCase_ : Any = BloomModel(_lowercase )
UpperCAmelCase_ : Tuple = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = None
for i, file in enumerate(_lowercase ):
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : List[Any] = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Optional[int] = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : str = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Dict = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : Dict = tensors[key] / pretraining_tp
UpperCAmelCase_ : Tuple = model.load_state_dict(_lowercase , strict=_lowercase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
UpperCAmelCase_ : Union[str, Any] = set(other_keys.missing_keys )
else:
UpperCAmelCase_ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCAmelCase_ : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
UpperCAmelCase_ : Optional[int] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _lowercase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
__a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 30 | 1 |
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
UpperCAmelCase_ : Optional[Any] = remove_duplicates(key.upper() )
UpperCAmelCase_ : Tuple = len(_lowercase )
# First fill cipher with key characters
UpperCAmelCase_ : List[str] = {alphabet[i]: char for i, char in enumerate(_lowercase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_lowercase ) , 26 ):
UpperCAmelCase_ : List[Any] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
UpperCAmelCase_ : Any = alphabet[i - offset]
UpperCAmelCase_ : Dict = char
return cipher_alphabet
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return "".join(cipher_map.get(_lowercase , _lowercase ) for ch in message.upper() )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_lowercase , _lowercase ) for ch in message.upper() )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = input('''Enter message to encode or decode: ''' ).strip()
UpperCAmelCase_ : Tuple = input('''Enter keyword: ''' ).strip()
UpperCAmelCase_ : Optional[Any] = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
UpperCAmelCase_ : List[str] = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
UpperCAmelCase_ : Any = create_cipher_map(_lowercase )
print(func(_lowercase , _lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 30 |
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
for i in range(1 , 1001 ):
total += i**i
return str(_lowercase )[-10:]
if __name__ == "__main__":
print(solution()) | 30 | 1 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
__a = parser.parse_args()
__a = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 30 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__a = None
__a = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__a = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = None
# Automatically constructed
lowerCAmelCase = "PIL.Image.Image"
lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase = field(default='''Image''' , init=_a , repr=_a )
def __call__( self ) -> Tuple:
return self.pa_type
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Dict = path.split('''::''' )[-1]
try:
UpperCAmelCase_ : Optional[int] = string_to_dict(_SCREAMING_SNAKE_CASE ,config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase_ : Tuple = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
UpperCAmelCase_ : Optional[Any] = None
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ,use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
UpperCAmelCase_ : List[str] = BytesIO(f.read() )
UpperCAmelCase_ : Optional[Any] = PIL.Image.open(bytes_ )
else:
UpperCAmelCase_ : List[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCAmelCase_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase_ : Dict = storage.field('''bytes''' )
else:
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase_ : int = storage.field('''path''' )
else:
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f:
UpperCAmelCase_ : Any = f.read()
return bytes_
UpperCAmelCase_ : Union[str, Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
UpperCAmelCase_ : List[str] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,)
UpperCAmelCase_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def lowerCamelCase__ ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase_ : Optional[int] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase_ : int = image.format
else:
UpperCAmelCase_ : List[Any] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(_lowercase , format=_lowercase )
return buffer.getvalue()
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if hasattr(_lowercase , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
UpperCAmelCase_ : Tuple = array.dtype
UpperCAmelCase_ : List[str] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
UpperCAmelCase_ : Dict = dtype.kind
UpperCAmelCase_ : Union[str, Any] = dtype.itemsize
UpperCAmelCase_ : Optional[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase_ : Tuple = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase_ : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase_ : Union[str, Any] = dtype_byteorder + dtype_kind + str(_lowercase )
UpperCAmelCase_ : str = np.dtype(_lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCAmelCase_ : Any = PIL.Image.fromarray(array.astype(_lowercase ) )
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
UpperCAmelCase_, UpperCAmelCase_ : Tuple = first_non_null_value(_lowercase )
if isinstance(_lowercase , _lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowercase , np.ndarray ):
UpperCAmelCase_ : Any = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
elif isinstance(_lowercase , PIL.Image.Image ):
UpperCAmelCase_ : Union[str, Any] = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
else:
return objs
else:
return objs | 30 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = KandinskyInpaintPipeline
lowerCAmelCase = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
lowerCAmelCase = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
lowerCAmelCase = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase = False
@property
def a__ ( self ) -> Tuple:
return 32
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> List[Any]:
return self.time_input_dim
@property
def a__ ( self ) -> List[Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> Union[str, Any]:
return 100
@property
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Dict = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def a__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : int = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=1_005 ,)
UpperCAmelCase_ : Tuple = MultilingualCLIP(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = text_encoder.eval()
return text_encoder
@property
def a__ ( self ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase_ : Any = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def a__ ( self ) -> str:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> str:
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : str = self.dummy_text_encoder
UpperCAmelCase_ : Optional[int] = self.dummy_tokenizer
UpperCAmelCase_ : Any = self.dummy_unet
UpperCAmelCase_ : List[Any] = self.dummy_movq
UpperCAmelCase_ : Any = DDIMScheduler(
num_train_timesteps=1_000 ,beta_schedule='''linear''' ,beta_start=0.0_00_85 ,beta_end=0.0_12 ,clip_sample=_SCREAMING_SNAKE_CASE ,set_alpha_to_one=_SCREAMING_SNAKE_CASE ,steps_offset=1 ,prediction_type='''epsilon''' ,thresholding=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : int = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=0 ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(_SCREAMING_SNAKE_CASE )
# create init_image
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = image.cpu().permute(0 ,2 ,3 ,1 )[0]
UpperCAmelCase_ : Dict = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
UpperCAmelCase_ : List[str] = np.ones((64, 64) ,dtype=np.floataa )
UpperCAmelCase_ : Union[str, Any] = 0
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def a__ ( self ) -> Any:
UpperCAmelCase_ : List[str] = '''cpu'''
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Union[str, Any] = output.images
UpperCAmelCase_ : Dict = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) ,return_dict=_SCREAMING_SNAKE_CASE ,)[0]
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Union[str, Any] = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def a__ ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
UpperCAmelCase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase_ : Optional[Any] = np.ones((768, 768) ,dtype=np.floataa )
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[Any] = '''a hat'''
UpperCAmelCase_ : Tuple = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' ,torch_dtype=torch.floataa )
pipe_prior.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' ,torch_dtype=torch.floataa )
UpperCAmelCase_ : Tuple = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase_, UpperCAmelCase_ : Dict = pipe_prior(
_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,num_inference_steps=5 ,negative_prompt='''''' ,).to_tuple()
UpperCAmelCase_ : Tuple = pipeline(
_SCREAMING_SNAKE_CASE ,image=_SCREAMING_SNAKE_CASE ,mask_image=_SCREAMING_SNAKE_CASE ,image_embeds=_SCREAMING_SNAKE_CASE ,negative_image_embeds=_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,num_inference_steps=100 ,height=768 ,width=768 ,output_type='''np''' ,)
UpperCAmelCase_ : Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) | 30 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
UpperCAmelCase_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
UpperCAmelCase_ : Optional[int] = load_dataset('''nielsr/rvlcdip-demo''' )
UpperCAmelCase_ : Optional[Any] = dataset['''train'''][0]['''image'''].convert('''RGB''' )
UpperCAmelCase_ : str = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.logits
UpperCAmelCase_ : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.float ,)
self.assertTrue(torch.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) | 30 | 1 |
import enum
import shutil
import sys
__a ,__a = shutil.get_terminal_size()
__a = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class __a( enum.Enum ):
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = 1
def lowerCamelCase__ ( _lowercase , _lowercase="" ):
'''simple docstring'''
sys.stdout.write(str(_lowercase ) + end )
sys.stdout.flush()
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase="" ):
'''simple docstring'''
forceWrite(f'''\u001b[{color}m{content}\u001b[0m''' , _lowercase )
def lowerCamelCase__ ( ):
'''simple docstring'''
forceWrite('''\r''' )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
forceWrite(f'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def lowerCamelCase__ ( ):
'''simple docstring'''
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def lowerCamelCase__ ( ):
'''simple docstring'''
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH ) | 30 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) | 30 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = filter(lambda _lowercase : p.requires_grad , model.parameters() )
UpperCAmelCase_ : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__a = logging.getLogger(__name__)
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if metric == "rouge2":
UpperCAmelCase_ : List[Any] = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
UpperCAmelCase_ : int = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
UpperCAmelCase_ : Any = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
''' function.''' )
UpperCAmelCase_ : List[Any] = ModelCheckpoint(
dirpath=_lowercase , filename=_lowercase , monitor=f'''val_{metric}''' , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=_lowercase , verbose=_lowercase , )
class __a( pl.Callback ):
"""simple docstring"""
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : Tuple = {f'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> None:
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
UpperCAmelCase_ : List[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
UpperCAmelCase_ : str = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase_ : Dict = od / '''test_results.txt'''
UpperCAmelCase_ : Union[str, Any] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase_ : Tuple = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
UpperCAmelCase_ : int = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
generations_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ,'''a+''' ) as writer:
for key in sorted(_SCREAMING_SNAKE_CASE ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase_ : Dict = metrics[key]
if isinstance(_SCREAMING_SNAKE_CASE ,torch.Tensor ):
UpperCAmelCase_ : Any = val.item()
UpperCAmelCase_ : Any = f'''{key}: {val:.6f}\n'''
writer.write(_SCREAMING_SNAKE_CASE )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase_ : Union[str, Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
try:
UpperCAmelCase_ : Optional[Any] = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase_ : Tuple = pl_module.model.num_parameters()
UpperCAmelCase_ : Optional[int] = count_trainable_parameters(_SCREAMING_SNAKE_CASE )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
save_json(pl_module.metrics ,pl_module.metrics_save_path )
return self._write_logs(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,'''test''' )
@rank_zero_only
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
save_json(pl_module.metrics ,pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 30 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0.9 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,) -> Optional[int]:
UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 30}
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Any = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize_and_center_crop
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : List[str] = crop_pct
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : Any = do_normalize
UpperCAmelCase_ : str = image_mean
UpperCAmelCase_ : List[Any] = image_std
def a__ ( self ) -> str:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def a__ ( self ) -> Dict:
UpperCAmelCase_ : str = PoolFormerImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''crop_pct''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_mean''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_std''' ) )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size ,{'''height''': 30, '''width''': 30} )
UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,) | 30 | 1 |
def lowerCamelCase__ ( _lowercase , _lowercase = " " ):
'''simple docstring'''
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Any = 0
for index, char in enumerate(_lowercase ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase_ : int = index + 1
elif index + 1 == len(_lowercase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod() | 30 |
import unittest
import numpy as np
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = None , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = np.shape(_lowercase )
UpperCAmelCase_ : Optional[Any] = np.shape(_lowercase )
UpperCAmelCase_ : Tuple = np.shape(_lowercase )
if shape_a[0] != shape_b[0]:
UpperCAmelCase_ : Tuple = (
'''Expected the same number of rows for A and B. '''
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(_lowercase )
if shape_b[1] != shape_c[1]:
UpperCAmelCase_ : List[Any] = (
'''Expected the same number of columns for B and C. '''
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(_lowercase )
UpperCAmelCase_ : Dict = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase_ : Any = np.linalg.inv(_lowercase )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : List[str] = np.array([[2, 1], [6, 3]] )
UpperCAmelCase_ : Tuple = schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.block([[a, b], [b.T, c]] )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = np.linalg.det(_SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE ,det_a * det_s )
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> None:
UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : int = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 30 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__a = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''albert'''
def __init__( self ,_SCREAMING_SNAKE_CASE=30_000 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=4_096 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=16_384 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE="gelu_new" ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-12 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="absolute" ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,**_SCREAMING_SNAKE_CASE ,) -> Any:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = embedding_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_hidden_groups
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Dict = inner_group_num
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = classifier_dropout_prob
UpperCAmelCase_ : Any = position_embedding_type
class __a( _a ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] ) | 30 |
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
UpperCAmelCase_ : Any = ''''''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
UpperCAmelCase_ : Any = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase_ : Union[str, Any] = B'''=''' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
UpperCAmelCase_ : int = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowercase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Tuple = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase , _lowercase ):
try:
UpperCAmelCase_ : Any = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
UpperCAmelCase_ : str = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase_ : List[Any] = encoded_data[:-padding]
UpperCAmelCase_ : List[Any] = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase_ : Tuple = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase_ : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowercase ) , 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__a = logging.get_logger(__name__)
@dataclass
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self ,**_SCREAMING_SNAKE_CASE ) -> int:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase_ : Any = deprecated_arg[3:]
UpperCAmelCase_ : Any = not kwargs.pop(_SCREAMING_SNAKE_CASE )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCAmelCase_ : List[str] = kwargs.pop('''tpu_name''' ,self.tpu_name )
UpperCAmelCase_ : Any = kwargs.pop('''device_idx''' ,self.device_idx )
UpperCAmelCase_ : Dict = kwargs.pop('''eager_mode''' ,self.eager_mode )
UpperCAmelCase_ : Any = kwargs.pop('''use_xla''' ,self.use_xla )
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''Name of TPU'''} , )
lowerCAmelCase = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''Benchmark models in eager model.'''} )
lowerCAmelCase = field(
default=_a , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def a__ ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['''tf'''] )
UpperCAmelCase_ : Tuple = None
if self.tpu:
try:
if self.tpu_name:
UpperCAmelCase_ : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
UpperCAmelCase_ : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
UpperCAmelCase_ : Optional[int] = None
return tpu
@cached_property
def a__ ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
UpperCAmelCase_ : List[Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'''GPU''' )
UpperCAmelCase_ : Any = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] ,'''GPU''' ) # disable GPU
UpperCAmelCase_ : Optional[int] = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def a__ ( self ) -> bool:
requires_backends(self ,['''tf'''] )
return self._setup_tpu is not None
@property
def a__ ( self ) -> "tf.distribute.Strategy":
requires_backends(self ,['''tf'''] )
return self._setup_strategy
@property
def a__ ( self ) -> Tuple:
requires_backends(self ,['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def a__ ( self ) -> int:
requires_backends(self ,['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def a__ ( self ) -> bool:
return self.n_gpu > 0 | 30 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 0
@slow
def a__ ( self ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,20 )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained('''./''' ,tokenizer_type='''xxx''' )
@require_tokenizers
def a__ ( self ) -> Optional[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase_ : Any = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
else:
self.assertEqual(tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
@require_tokenizers
def a__ ( self ) -> List[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' ,):
UpperCAmelCase_ : int = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def a__ ( self ) -> Optional[Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
UpperCAmelCase_ : int = TOKENIZER_MAPPING.values()
UpperCAmelCase_ : List[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Tuple:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ,use_fast=_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = '''Hello, world. How are you?'''
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
@require_tokenizers
def a__ ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
self.assertEqual(tokenizer.vocab_size ,30_000 )
self.assertEqual(tokenizer.unk_token ,'''[UNK]''' )
self.assertEqual(tokenizer.padding_side ,'''right''' )
self.assertEqual(tokenizer.truncation_side ,'''right''' )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size ,12 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
# Check we can load the tokenizer config of an online model.
UpperCAmelCase_ : int = get_tokenizer_config('''bert-base-cased''' )
UpperCAmelCase_ : Optional[int] = config.pop('''_commit_hash''' ,_SCREAMING_SNAKE_CASE )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_SCREAMING_SNAKE_CASE ,{'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase_ : Any = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] ,'''BertTokenizer''' )
def a__ ( self ) -> str:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def a__ ( self ) -> int:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
# Can register in two steps
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = False
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = NewTokenizer
lowerCAmelCase = False
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
def a__ ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' )
def a__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' )
def a__ ( self ) -> Any:
# Make sure we have cached the tokenizer.
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 ) | 30 | 1 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase_ : List[str] = model_name.find('''patch''' )
UpperCAmelCase_ : Optional[int] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
UpperCAmelCase_ : List[str] = XCLIPVisionConfig(patch_size=_lowercase , num_frames=_lowercase )
if "large" in model_name:
UpperCAmelCase_ : Optional[int] = 768
UpperCAmelCase_ : Dict = 3072
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : str = 1024
UpperCAmelCase_ : Any = 4096
UpperCAmelCase_ : Union[str, Any] = 16
UpperCAmelCase_ : Optional[Any] = 24
UpperCAmelCase_ : List[Any] = 768
UpperCAmelCase_ : Dict = 3072
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ : int = 336
UpperCAmelCase_ : List[Any] = XCLIPConfig.from_text_vision_configs(_lowercase , _lowercase )
if "large" in model_name:
UpperCAmelCase_ : str = 768
return config
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if name == "token_embedding.weight":
UpperCAmelCase_ : Optional[int] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
UpperCAmelCase_ : Any = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
UpperCAmelCase_ : Any = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
UpperCAmelCase_ : List[str] = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
UpperCAmelCase_ : List[Any] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
UpperCAmelCase_ : Any = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
UpperCAmelCase_ : int = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase_ : str = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
UpperCAmelCase_ : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase_ : List[str] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
UpperCAmelCase_ : Optional[Any] = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
UpperCAmelCase_ : Union[str, Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
UpperCAmelCase_ : Dict = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
UpperCAmelCase_ : List[Any] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
UpperCAmelCase_ : Optional[int] = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
UpperCAmelCase_ : str = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
UpperCAmelCase_ : Optional[int] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase_ : List[str] = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
UpperCAmelCase_ : List[str] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase_ : int = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
UpperCAmelCase_ : Optional[int] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
UpperCAmelCase_ : Optional[int] = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : Any = orig_state_dict.pop(_lowercase )
if "attn.in_proj" in key:
UpperCAmelCase_ : Optional[int] = key.split('''.''' )
if key.startswith('''visual''' ):
UpperCAmelCase_ : List[Any] = key_split[3]
UpperCAmelCase_ : Optional[int] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase_ : List[str] = val[
:dim, :
]
UpperCAmelCase_ : Optional[int] = val[
dim : dim * 2, :
]
UpperCAmelCase_ : Optional[int] = val[
-dim:, :
]
else:
UpperCAmelCase_ : List[Any] = val[
:dim
]
UpperCAmelCase_ : Any = val[
dim : dim * 2
]
UpperCAmelCase_ : List[str] = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase_ : Optional[int] = val[
:dim, :
]
UpperCAmelCase_ : List[Any] = val[
dim : dim * 2, :
]
UpperCAmelCase_ : List[str] = val[
-dim:, :
]
else:
UpperCAmelCase_ : List[Any] = val[:dim]
UpperCAmelCase_ : List[Any] = val[
dim : dim * 2
]
UpperCAmelCase_ : Optional[Any] = val[-dim:]
elif key.startswith('''mit''' ):
UpperCAmelCase_ : Optional[Any] = key_split[2]
UpperCAmelCase_ : Tuple = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase_ : Optional[Any] = val[:dim, :]
UpperCAmelCase_ : Optional[int] = val[dim : dim * 2, :]
UpperCAmelCase_ : Optional[int] = val[-dim:, :]
else:
UpperCAmelCase_ : List[str] = val[:dim]
UpperCAmelCase_ : List[Any] = val[dim : dim * 2]
UpperCAmelCase_ : int = val[-dim:]
else:
UpperCAmelCase_ : List[str] = key_split[2]
UpperCAmelCase_ : List[str] = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase_ : Tuple = val[:dim, :]
UpperCAmelCase_ : Any = val[
dim : dim * 2, :
]
UpperCAmelCase_ : str = val[-dim:, :]
else:
UpperCAmelCase_ : Optional[int] = val[:dim]
UpperCAmelCase_ : Tuple = val[
dim : dim * 2
]
UpperCAmelCase_ : List[Any] = val[-dim:]
else:
UpperCAmelCase_ : int = rename_key(_lowercase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase_ : Dict = val.T
UpperCAmelCase_ : Tuple = val
return orig_state_dict
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if num_frames == 8:
UpperCAmelCase_ : Tuple = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
UpperCAmelCase_ : Union[str, Any] = '''eating_spaghetti.npy'''
elif num_frames == 32:
UpperCAmelCase_ : Optional[Any] = '''eating_spaghetti_32_frames.npy'''
UpperCAmelCase_ : List[Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=_lowercase , repo_type='''dataset''' , )
UpperCAmelCase_ : Union[str, Any] = np.load(_lowercase )
return list(_lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase=None , _lowercase=False ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
UpperCAmelCase_ : Optional[int] = model_to_url[model_name]
UpperCAmelCase_ : Dict = 8
if "16-frames" in model_name:
UpperCAmelCase_ : List[Any] = 16
elif "shot" in model_name:
UpperCAmelCase_ : Tuple = 32
UpperCAmelCase_ : Any = get_xclip_config(_lowercase , _lowercase )
UpperCAmelCase_ : str = XCLIPModel(_lowercase )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase_ : int = '''pytorch_model.bin'''
gdown.cached_download(_lowercase , _lowercase , quiet=_lowercase )
UpperCAmelCase_ : Union[str, Any] = torch.load(_lowercase , map_location='''cpu''' )['''model''']
else:
UpperCAmelCase_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowercase )['''model''']
UpperCAmelCase_ : Dict = convert_state_dict(_lowercase , _lowercase )
UpperCAmelCase_ : int = XCLIPModel(_lowercase )
UpperCAmelCase_, UpperCAmelCase_ : List[Any] = model.load_state_dict(_lowercase , strict=_lowercase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase_ : Optional[Any] = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
UpperCAmelCase_ : List[str] = VideoMAEImageProcessor(size=_lowercase )
UpperCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
UpperCAmelCase_ : Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
UpperCAmelCase_ : List[Any] = XCLIPProcessor(image_processor=_lowercase , tokenizer=_lowercase )
UpperCAmelCase_ : Dict = prepare_video(_lowercase )
UpperCAmelCase_ : List[str] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=_lowercase , return_tensors='''pt''' , padding=_lowercase )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase_ : str = model(**_lowercase )
# Verify outputs
UpperCAmelCase_ : Dict = outputs.logits_per_video
UpperCAmelCase_ : Union[str, Any] = logits_per_video.softmax(dim=1 )
print('''Probs:''' , _lowercase )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase_ : Tuple = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase_ : Any = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase_ : Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase_ : str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ : Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase_ : Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase_ : List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase_ : Optional[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase_ : List[Any] = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase_ : str = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase_ : List[str] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase_ : Dict = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase_ : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase_ : Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase_ : List[str] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(_lowercase , organization='''nielsr''' )
processor.push_to_hub(_lowercase , organization='''nielsr''' )
slow_tokenizer.push_to_hub(_lowercase , organization='''nielsr''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__a = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 30 |
from functools import reduce
__a = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase__ ( _lowercase = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) )
for i in range(len(_lowercase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 30 | 1 |
import argparse
import os
import re
__a = 'src/transformers'
# Pattern that looks at the indentation in a line.
__a = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
__a = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__a = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
__a = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__a = re.compile(R'\[([^\]]+)\]')
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = _re_indent.search(_lowercase )
return "" if search is None else search.groups()[0]
def lowerCamelCase__ ( _lowercase , _lowercase="" , _lowercase=None , _lowercase=None ):
'''simple docstring'''
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[Any] = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(_lowercase ):
index += 1
UpperCAmelCase_ : Optional[int] = ['''\n'''.join(lines[:index] )]
else:
UpperCAmelCase_ : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCAmelCase_ : int = [lines[index]]
index += 1
while index < len(_lowercase ) and (end_prompt is None or not lines[index].startswith(_lowercase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowercase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(_lowercase ) )
if index < len(_lowercase ) - 1:
UpperCAmelCase_ : Optional[int] = [lines[index + 1]]
index += 1
else:
UpperCAmelCase_ : int = []
else:
blocks.append('''\n'''.join(_lowercase ) )
UpperCAmelCase_ : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowercase ) > 0:
blocks.append('''\n'''.join(_lowercase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowercase ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
def _inner(_lowercase ):
return key(_lowercase ).lower().replace('''_''' , '''''' )
return _inner
def lowerCamelCase__ ( _lowercase , _lowercase=None ):
'''simple docstring'''
def noop(_lowercase ):
return x
if key is None:
UpperCAmelCase_ : Dict = noop
# Constants are all uppercase, they go first.
UpperCAmelCase_ : List[Any] = [obj for obj in objects if key(_lowercase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCAmelCase_ : List[str] = [obj for obj in objects if key(_lowercase )[0].isupper() and not key(_lowercase ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCAmelCase_ : Dict = [obj for obj in objects if not key(_lowercase )[0].isupper()]
UpperCAmelCase_ : List[str] = ignore_underscore(_lowercase )
return sorted(_lowercase , key=_lowercase ) + sorted(_lowercase , key=_lowercase ) + sorted(_lowercase , key=_lowercase )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
def _replace(_lowercase ):
UpperCAmelCase_ : Union[str, Any] = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
UpperCAmelCase_ : List[str] = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase_ : Tuple = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(_lowercase )] ) + "]"
UpperCAmelCase_ : str = import_statement.split('''\n''' )
if len(_lowercase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCAmelCase_ : List[Any] = 2 if lines[1].strip() == '''[''' else 1
UpperCAmelCase_ : Tuple = [(i, _re_strip_line.search(_lowercase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCAmelCase_ : str = sort_objects(_lowercase , key=lambda _lowercase : x[1] )
UpperCAmelCase_ : Dict = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowercase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCAmelCase_ : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
UpperCAmelCase_ : str = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase_ : int = keys[:-1]
UpperCAmelCase_ : List[Any] = get_indent(lines[1] ) + ''', '''.join([f'''"{k}"''' for k in sort_objects(_lowercase )] )
return "\n".join(_lowercase )
else:
# Finally we have to deal with imports fitting on one line
UpperCAmelCase_ : List[Any] = _re_bracket_content.sub(_replace , _lowercase )
return import_statement
def lowerCamelCase__ ( _lowercase , _lowercase=True ):
'''simple docstring'''
with open(_lowercase , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCAmelCase_ : int = split_code_in_indented_blocks(
_lowercase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowercase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCAmelCase_ : Union[str, Any] = main_blocks[block_idx]
UpperCAmelCase_ : List[str] = block.split('''\n''' )
# Get to the start of the imports.
UpperCAmelCase_ : str = 0
while line_idx < len(_lowercase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCAmelCase_ : Tuple = len(_lowercase )
else:
line_idx += 1
if line_idx >= len(_lowercase ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCAmelCase_ : List[str] = '''\n'''.join(block_lines[line_idx:-1] )
UpperCAmelCase_ : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCAmelCase_ : Any = split_code_in_indented_blocks(_lowercase , indent_level=_lowercase )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCAmelCase_ : Optional[Any] = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCAmelCase_ : Dict = [(pattern.search(_lowercase ).groups()[0] if pattern.search(_lowercase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCAmelCase_ : List[Any] = [(i, key) for i, key in enumerate(_lowercase ) if key is not None]
UpperCAmelCase_ : List[Any] = [x[0] for x in sorted(_lowercase , key=lambda _lowercase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : int = []
for i in range(len(_lowercase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
UpperCAmelCase_ : List[str] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_lowercase )
count += 1
# And we put our main block back together with its first and last line.
UpperCAmelCase_ : int = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_lowercase ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowercase ) )
def lowerCamelCase__ ( _lowercase=True ):
'''simple docstring'''
UpperCAmelCase_ : str = []
for root, _, files in os.walk(_lowercase ):
if "__init__.py" in files:
UpperCAmelCase_ : int = sort_imports(os.path.join(_lowercase , '''__init__.py''' ) , check_only=_lowercase )
if result:
UpperCAmelCase_ : str = [os.path.join(_lowercase , '''__init__.py''' )]
if len(_lowercase ) > 0:
raise ValueError(f'''Would overwrite {len(_lowercase )} files, run `make style`.''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__a = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 30 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
UpperCAmelCase_ : Tuple = precision
UpperCAmelCase_ : Optional[Any] = ceil(precision / 14 )
UpperCAmelCase_ : int = 426880 * Decimal(10005 ).sqrt()
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : List[Any] = 13591409
UpperCAmelCase_ : Optional[Any] = Decimal(_lowercase )
for k in range(1 , _lowercase ):
UpperCAmelCase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__a = 50
print(F"""The first {n} digits of pi is: {pi(n)}""") | 30 | 1 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
UpperCAmelCase_ : int = quote(_lowercase )
return hfh.hf_hub_url(_lowercase , _lowercase , repo_type='''dataset''' , revision=_lowercase ) | 30 |
from __future__ import annotations
import math
__a = '2020.9.26'
__a = 'xcodz-dot, cclaus, dhruvmanila'
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ):
UpperCAmelCase_ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(_lowercase )
UpperCAmelCase_ : Tuple = ((x * distance) / (z + distance)) * scale
UpperCAmelCase_ : str = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Axis must be a str''' )
UpperCAmelCase_ : Optional[Any] = locals()
del input_variables["axis"]
if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ):
UpperCAmelCase_ : List[Any] = (
'''Input values except axis must either be float or int: '''
f'''{list(input_variables.values() )}'''
)
raise TypeError(_lowercase )
UpperCAmelCase_ : Dict = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
UpperCAmelCase_ : Optional[int] = x * math.cos(_lowercase ) - y * math.sin(_lowercase )
UpperCAmelCase_ : List[Any] = y * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z
elif axis == "x":
UpperCAmelCase_ : Any = y * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : int = z * math.cos(_lowercase ) + y * math.sin(_lowercase )
UpperCAmelCase_ : Dict = x
elif axis == "y":
UpperCAmelCase_ : Union[str, Any] = x * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""") | 30 | 1 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__a = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__a = concatenate_datasets
__a = DownloadConfig
__a = DownloadManager
__a = DownloadMode
__a = DownloadConfig
__a = DownloadMode
__a = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 30 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
UpperCAmelCase_ : Optional[int] = tokenizer('''Hello there''' ,return_tensors='''tf''' ).input_ids
UpperCAmelCase_ : Tuple = tokenizer('''Hi I am''' ,return_tensors='''tf''' ).input_ids
UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE ).loss
UpperCAmelCase_ : Optional[Any] = -tf.math.reduce_mean(_SCREAMING_SNAKE_CASE ).numpy()
UpperCAmelCase_ : Optional[int] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 ) | 30 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
while a != 0:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = b % a, a
return b
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if gcd(_lowercase , _lowercase ) != 1:
UpperCAmelCase_ : int = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowercase )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = 1, 0, a
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = 0, 1, m
while va != 0:
UpperCAmelCase_ : List[Any] = ua // va
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 30 | 1 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__a = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
__a = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase_ : Union[str, Any] = int(re.match(r'''.*layer_(\d*).*''' , _lowercase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase_ : Any = re.search(r'''[^\d](\d+)$''' , str(_lowercase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
UpperCAmelCase_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if bloom_config_file == "":
UpperCAmelCase_ : Tuple = BloomConfig()
else:
UpperCAmelCase_ : Optional[int] = BloomConfig.from_json_file(_lowercase )
if shard_model:
UpperCAmelCase_ : Any = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = {'''weight_map''': {}, '''metadata''': {}}
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = BloomConfig()
for j, file in enumerate(_lowercase ):
print('''Processing file: {}'''.format(_lowercase ) )
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : Tuple = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Any = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : Dict = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Union[str, Any] = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : Tuple = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : List[str] = tensors[key] / pretraining_tp
torch.save(
_lowercase , os.path.join(
_lowercase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase_ : Union[str, Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) )
UpperCAmelCase_ : List[Any] = BloomConfig()
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : List[str] = total_size
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_lowercase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Optional[Any] = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n'''
f.write(_lowercase )
else:
UpperCAmelCase_ : Any = BloomModel(_lowercase )
UpperCAmelCase_ : Tuple = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = None
for i, file in enumerate(_lowercase ):
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : List[Any] = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Optional[int] = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : str = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Dict = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : Dict = tensors[key] / pretraining_tp
UpperCAmelCase_ : Tuple = model.load_state_dict(_lowercase , strict=_lowercase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
UpperCAmelCase_ : Union[str, Any] = set(other_keys.missing_keys )
else:
UpperCAmelCase_ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCAmelCase_ : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
UpperCAmelCase_ : Optional[int] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _lowercase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
__a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 30 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCAmelCase = '''CIDAS/clipseg-rd64-refined'''
lowerCAmelCase = '''image_segmenter'''
lowerCAmelCase = CLIPSegForImageSegmentation
lowerCAmelCase = ['''image''', '''text''']
lowerCAmelCase = ['''image''']
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self ,['''vision'''] )
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.pre_processor(text=[label] ,images=[image] ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
with torch.no_grad():
UpperCAmelCase_ : Dict = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : Dict = outputs.cpu().detach().numpy()
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) ) | 30 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 |
import numpy as np
import datasets
__a = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__a = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__a = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' ,id='''sequence''' ) ,id='''X''' ),
} ) ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# convert to numpy arrays
UpperCAmelCase_ : str = np.array(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.array(_SCREAMING_SNAKE_CASE )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
UpperCAmelCase_ : List[str] = X - np.mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = np.cov(reference_distribution.T )
try:
UpperCAmelCase_ : Any = np.linalg.inv(_SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
UpperCAmelCase_ : List[str] = np.linalg.pinv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = np.dot(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.dot(_SCREAMING_SNAKE_CASE ,X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 30 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowercase , _lowercase=False , _lowercase=False , _lowercase=False ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
UpperCAmelCase_ : List[str] = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ : List[Any] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase_ : Tuple = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ : Optional[int] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = dct.pop(_lowercase )
UpperCAmelCase_ : Dict = val
@torch.no_grad()
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowercase )
UpperCAmelCase_ : int = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : int = False
UpperCAmelCase_ : Dict = False
if "vqa" in checkpoint_url:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : str = 3129
UpperCAmelCase_ : List[str] = '''huggingface/label-files'''
UpperCAmelCase_ : List[Any] = '''vqa2-id2label.json'''
UpperCAmelCase_ : Any = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : int = idalabel
UpperCAmelCase_ : List[Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Optional[Any] = ViltForQuestionAnswering(_lowercase )
elif "nlvr" in checkpoint_url:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Any = {0: '''False''', 1: '''True'''}
UpperCAmelCase_ : Optional[int] = {v: k for k, v in config.idalabel.items()}
UpperCAmelCase_ : int = 3
UpperCAmelCase_ : Optional[int] = ViltForImagesAndTextClassification(_lowercase )
elif "irtr" in checkpoint_url:
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Tuple = ViltForImageAndTextRetrieval(_lowercase )
elif "mlm_itm" in checkpoint_url:
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Any = ViltForMaskedLM(_lowercase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' )['''state_dict''']
UpperCAmelCase_ : int = create_rename_keys(_lowercase , _lowercase , _lowercase , _lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
read_in_q_k_v(_lowercase , _lowercase )
if mlm_model or irtr_model:
UpperCAmelCase_ : Any = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
UpperCAmelCase_, UpperCAmelCase_ : int = model.load_state_dict(_lowercase , strict=_lowercase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowercase )
# Define processor
UpperCAmelCase_ : Dict = ViltImageProcessor(size=384 )
UpperCAmelCase_ : Optional[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
UpperCAmelCase_ : Optional[int] = ViltProcessor(_lowercase , _lowercase )
# Forward pass on example inputs (image + text)
if nlvr_model:
UpperCAmelCase_ : int = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=_lowercase ).raw )
UpperCAmelCase_ : Any = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=_lowercase ).raw )
UpperCAmelCase_ : Any = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
UpperCAmelCase_ : Tuple = processor(_lowercase , _lowercase , return_tensors='''pt''' )
UpperCAmelCase_ : Optional[int] = processor(_lowercase , _lowercase , return_tensors='''pt''' )
UpperCAmelCase_ : Tuple = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
UpperCAmelCase_ : str = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=_lowercase ).raw )
if mlm_model:
UpperCAmelCase_ : List[str] = '''a bunch of [MASK] laying on a [MASK].'''
else:
UpperCAmelCase_ : Optional[Any] = '''How many cats are there?'''
UpperCAmelCase_ : str = processor(_lowercase , _lowercase , return_tensors='''pt''' )
UpperCAmelCase_ : Any = model(**_lowercase )
# Verify outputs
if mlm_model:
UpperCAmelCase_ : Optional[int] = torch.Size([1, 11, 30522] )
UpperCAmelCase_ : str = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowercase , atol=1E-4 )
# verify masked token prediction equals "cats"
UpperCAmelCase_ : str = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
UpperCAmelCase_ : List[str] = torch.Size([1, 3129] )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowercase , atol=1E-4 )
# verify vqa prediction equals "2"
UpperCAmelCase_ : List[str] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
UpperCAmelCase_ : Union[str, Any] = torch.Size([1, 2] )
UpperCAmelCase_ : List[str] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 30 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__a = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__a = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_a )
class __a:
"""simple docstring"""
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
elif titles is None or texts is None:
UpperCAmelCase_ : List[str] = titles if texts is None else texts
return super().__call__(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[Any] = titles if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [titles]
UpperCAmelCase_ : List[str] = texts if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [texts]
UpperCAmelCase_ : Any = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = questions if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [questions] * n_passages
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.''' )
UpperCAmelCase_ : Tuple = super().__call__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : int = super().__call__(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : Optional[int] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ : Dict = attention_mask
return self.pad(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 4 ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = reader_input['''input_ids''']
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = reader_output[:3]
UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = sorted(range(_SCREAMING_SNAKE_CASE ) ,reverse=_SCREAMING_SNAKE_CASE ,key=relevance_logits.__getitem__ )
UpperCAmelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_ : List[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_SCREAMING_SNAKE_CASE ,top_spans=_SCREAMING_SNAKE_CASE ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_SCREAMING_SNAKE_CASE ,start_index=_SCREAMING_SNAKE_CASE ,end_index=_SCREAMING_SNAKE_CASE ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = []
for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ : int = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] ,reverse=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
UpperCAmelCase_ : str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = ['''input_ids''', '''attention_mask'''] | 30 | 1 |
from typing import Any
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not input_list:
return []
UpperCAmelCase_ : Any = [input_list.count(_lowercase ) for value in input_list]
UpperCAmelCase_ : Dict = max(_lowercase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_lowercase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 | 1 |
import os
import sys
import unittest
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a = os.path.join(git_repo_path, 'src', 'transformers')
__a = '\n{0} = None\n'
__a = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
__a = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(_SCREAMING_SNAKE_CASE ,'''tokenizers''' )
UpperCAmelCase_ : Tuple = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(_SCREAMING_SNAKE_CASE ,'''tensorflow_text''' )
UpperCAmelCase_ : int = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(_SCREAMING_SNAKE_CASE ,'''sentencepiece_and_tokenizers''' )
UpperCAmelCase_ : Any = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(_SCREAMING_SNAKE_CASE ,'''sentencepiece_and_tensorflow_text''' )
UpperCAmelCase_ : Dict = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(_SCREAMING_SNAKE_CASE ,'''sentencepiece_and_tokenizers_and_vision''' )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' ,_SCREAMING_SNAKE_CASE )
self.assertIn('''tensorflow_text''' ,_SCREAMING_SNAKE_CASE )
self.assertIn('''sentencepiece_and_tokenizers''' ,_SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' ,objects['''torch'''] )
self.assertIn('''TFBertModel''' ,objects['''tf'''] )
self.assertIn('''FlaxBertModel''' ,objects['''flax'''] )
self.assertIn('''BertModel''' ,objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' ,objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' ,objects['''sentencepiece_and_tokenizers'''] )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = create_dummy_object('''CONSTANT''' ,'''\'torch\'''' )
self.assertEqual(_SCREAMING_SNAKE_CASE ,'''\nCONSTANT = None\n''' )
UpperCAmelCase_ : str = create_dummy_object('''function''' ,'''\'torch\'''' )
self.assertEqual(
_SCREAMING_SNAKE_CASE ,'''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
UpperCAmelCase_ : Union[str, Any] = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
UpperCAmelCase_ : Tuple = create_dummy_object('''FakeClass''' ,'''\'torch\'''' )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
UpperCAmelCase_ : str = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] ,_SCREAMING_SNAKE_CASE ) | 30 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''wav2vec2'''
def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.05 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="sum" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1_500) ,_SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Tuple = feat_extract_norm
UpperCAmelCase_ : List[Any] = feat_extract_activation
UpperCAmelCase_ : str = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = conv_bias
UpperCAmelCase_ : str = num_conv_pos_embeddings
UpperCAmelCase_ : Any = num_conv_pos_embedding_groups
UpperCAmelCase_ : Tuple = len(self.conv_dim )
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : str = hidden_dropout
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Tuple = activation_dropout
UpperCAmelCase_ : List[str] = feat_proj_dropout
UpperCAmelCase_ : int = final_dropout
UpperCAmelCase_ : Union[str, Any] = layerdrop
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Optional[int] = do_stable_layer_norm
UpperCAmelCase_ : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : Optional[int] = apply_spec_augment
UpperCAmelCase_ : Tuple = mask_time_prob
UpperCAmelCase_ : Optional[Any] = mask_time_length
UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks
UpperCAmelCase_ : Optional[Any] = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Union[str, Any] = num_codevectors_per_group
UpperCAmelCase_ : Any = num_codevector_groups
UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature
UpperCAmelCase_ : List[str] = feat_quantizer_dropout
UpperCAmelCase_ : Dict = num_negatives
UpperCAmelCase_ : List[str] = codevector_dim
UpperCAmelCase_ : List[str] = proj_codevector_dim
UpperCAmelCase_ : str = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : List[Any] = ctc_loss_reduction
UpperCAmelCase_ : List[str] = ctc_zero_infinity
# adapter
UpperCAmelCase_ : Optional[Any] = add_adapter
UpperCAmelCase_ : Any = adapter_kernel_size
UpperCAmelCase_ : Optional[int] = adapter_stride
UpperCAmelCase_ : List[Any] = num_adapter_layers
UpperCAmelCase_ : Optional[Any] = output_hidden_size or hidden_size
UpperCAmelCase_ : Optional[int] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = xvector_output_dim
@property
def a__ ( self ) -> Any:
return functools.reduce(operator.mul ,self.conv_stride ,1 ) | 30 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__a = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class __a( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Dict = pipeline(
task='''text-classification''' ,model='''hf-internal-testing/tiny-random-distilbert''' ,framework='''pt''' )
UpperCAmelCase_ : Optional[int] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) ,[{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
UpperCAmelCase_ : Tuple = text_classifier('''This is great !''' ,top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) ,[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}] )
UpperCAmelCase_ : Union[str, Any] = text_classifier(['''This is great !''', '''This is bad'''] ,top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) ,[
[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}],
] ,)
UpperCAmelCase_ : int = text_classifier('''This is great !''' ,top_k=1 )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) ,[{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
# Legacy behavior
UpperCAmelCase_ : int = text_classifier('''This is great !''' ,return_all_scores=_SCREAMING_SNAKE_CASE )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) ,[{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
UpperCAmelCase_ : Optional[int] = text_classifier('''This is great !''' ,return_all_scores=_SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) ,[[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}]] )
UpperCAmelCase_ : Any = text_classifier(['''This is great !''', '''Something else'''] ,return_all_scores=_SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) ,[
[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}],
] ,)
UpperCAmelCase_ : Tuple = text_classifier(['''This is great !''', '''Something else'''] ,return_all_scores=_SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) ,[
{'''label''': '''LABEL_0''', '''score''': 0.5_04},
{'''label''': '''LABEL_0''', '''score''': 0.5_04},
] ,)
@require_torch
def a__ ( self ) -> str:
import torch
UpperCAmelCase_ : int = pipeline(
task='''text-classification''' ,model='''hf-internal-testing/tiny-random-distilbert''' ,framework='''pt''' ,device=torch.device('''cpu''' ) ,)
UpperCAmelCase_ : Optional[int] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) ,[{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
@require_tf
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = pipeline(
task='''text-classification''' ,model='''hf-internal-testing/tiny-random-distilbert''' ,framework='''tf''' )
UpperCAmelCase_ : Tuple = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) ,[{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
@slow
@require_torch
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = pipeline('''text-classification''' )
UpperCAmelCase_ : Optional[Any] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) ,[{'''label''': '''POSITIVE''', '''score''': 1.0}] )
UpperCAmelCase_ : Tuple = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) ,[{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
UpperCAmelCase_ : Tuple = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) ,[{'''label''': '''POSITIVE''', '''score''': 0.9_88}] )
@slow
@require_tf
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = pipeline('''text-classification''' ,framework='''tf''' )
UpperCAmelCase_ : List[Any] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) ,[{'''label''': '''POSITIVE''', '''score''': 1.0}] )
UpperCAmelCase_ : Any = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) ,[{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
UpperCAmelCase_ : Optional[Any] = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) ,[{'''label''': '''POSITIVE''', '''score''': 0.9_88}] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : Optional[int] = TextClassificationPipeline(model=_SCREAMING_SNAKE_CASE ,tokenizer=_SCREAMING_SNAKE_CASE )
return text_classifier, ["HuggingFace is in", "This is another test"]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
UpperCAmelCase_ : Tuple = '''HuggingFace is in'''
UpperCAmelCase_ : str = text_classifier(_SCREAMING_SNAKE_CASE )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) ,[{'''label''': ANY(_SCREAMING_SNAKE_CASE ), '''score''': ANY(_SCREAMING_SNAKE_CASE )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
UpperCAmelCase_ : int = ['''HuggingFace is in ''', '''Paris is in France''']
UpperCAmelCase_ : Optional[Any] = text_classifier(_SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) ,[{'''label''': ANY(_SCREAMING_SNAKE_CASE ), '''score''': ANY(_SCREAMING_SNAKE_CASE )}, {'''label''': ANY(_SCREAMING_SNAKE_CASE ), '''score''': ANY(_SCREAMING_SNAKE_CASE )}] ,)
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
UpperCAmelCase_ : Optional[Any] = text_classifier(_SCREAMING_SNAKE_CASE ,top_k=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) ,[[{'''label''': ANY(_SCREAMING_SNAKE_CASE ), '''score''': ANY(_SCREAMING_SNAKE_CASE )}] * N, [{'''label''': ANY(_SCREAMING_SNAKE_CASE ), '''score''': ANY(_SCREAMING_SNAKE_CASE )}] * N] ,)
UpperCAmelCase_ : List[str] = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
UpperCAmelCase_ : Any = text_classifier(_SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) ,{'''label''': ANY(_SCREAMING_SNAKE_CASE ), '''score''': ANY(_SCREAMING_SNAKE_CASE )} ,)
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
UpperCAmelCase_ : Union[str, Any] = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
text_classifier(_SCREAMING_SNAKE_CASE )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
UpperCAmelCase_ : Optional[int] = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) ,[{'''label''': ANY(_SCREAMING_SNAKE_CASE ), '''score''': ANY(_SCREAMING_SNAKE_CASE )}] ,)
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() ) | 30 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''vivit'''
def __init__( self ,_SCREAMING_SNAKE_CASE=224 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=[2, 16, 16] ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu_fast" ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-06 ,_SCREAMING_SNAKE_CASE=True ,**_SCREAMING_SNAKE_CASE ,) -> int:
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Tuple = layer_norm_eps
UpperCAmelCase_ : List[str] = image_size
UpperCAmelCase_ : str = num_frames
UpperCAmelCase_ : str = tubelet_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Any = qkv_bias
super().__init__(**_SCREAMING_SNAKE_CASE ) | 30 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1 ) -> Dict:
UpperCAmelCase_ : List[Any] = tokenizer
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
UpperCAmelCase_ : Optional[int] = n_copies
def __iter__( self ) -> Any:
UpperCAmelCase_ : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : str = start_length
UpperCAmelCase_ : Optional[int] = eof_strings
UpperCAmelCase_ : str = tokenizer
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCAmelCase_ : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = defaultdict(_lowercase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowercase ) ):
with torch.no_grad():
UpperCAmelCase_ : Dict = batch['''ids'''].shape[-1]
UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(_lowercase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase )
# each task is generated batch_size times
UpperCAmelCase_ : Union[str, Any] = batch['''task_id'''].repeat(_lowercase )
UpperCAmelCase_ : Dict = accelerator.pad_across_processes(
_lowercase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCAmelCase_, UpperCAmelCase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase_ : Union[str, Any] = generated_tokens.cpu().numpy()
UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowercase , _lowercase ):
gen_token_dict[task].append(_lowercase )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_lowercase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
code_gens[task].append(remove_last_block(_lowercase ) )
return code_gens
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_lowercase )
UpperCAmelCase_ : int = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase_ : List[Any] = '''false'''
if args.num_workers is None:
UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase_ : int = Accelerator()
set_seed(args.seed , device_specific=_lowercase )
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ : Any = tokenizer.eos_token
UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase_ : str = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase_ : Tuple = load_dataset('''openai_humaneval''' )
UpperCAmelCase_ : Dict = load_metric('''code_eval''' )
UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
UpperCAmelCase_ : str = args.n_samples // args.batch_size
UpperCAmelCase_ : str = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase_ : Optional[Any] = DataLoader(_lowercase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(_lowercase , _lowercase )
UpperCAmelCase_ : int = complete_code(
_lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , )
if accelerator.is_main_process:
UpperCAmelCase_ : Any = []
for task in tqdm(range(_lowercase ) ):
UpperCAmelCase_ : int = human_eval['''test'''][task]['''test''']
UpperCAmelCase_ : str = f'''check({human_eval["test"][task]["entry_point"]})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase_, UpperCAmelCase_ : Any = code_eval_metric.compute(
references=_lowercase , predictions=_lowercase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(_lowercase , _lowercase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 30 | 1 |
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = input('''Enter message: ''' )
UpperCAmelCase_ : Optional[int] = input('''Enter key [alphanumeric]: ''' )
UpperCAmelCase_ : Tuple = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
UpperCAmelCase_ : Tuple = '''encrypt'''
UpperCAmelCase_ : List[Any] = encrypt_message(_lowercase , _lowercase )
elif mode.lower().startswith('''d''' ):
UpperCAmelCase_ : List[Any] = '''decrypt'''
UpperCAmelCase_ : str = decrypt_message(_lowercase , _lowercase )
print(f'''\n{mode.title()}ed message:''' )
print(_lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return translate_message(_lowercase , _lowercase , '''encrypt''' )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return translate_message(_lowercase , _lowercase , '''decrypt''' )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Optional[Any] = key.upper()
for symbol in message:
UpperCAmelCase_ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowercase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowercase ):
UpperCAmelCase_ : Any = 0
else:
translated.append(_lowercase )
return "".join(_lowercase )
if __name__ == "__main__":
main() | 30 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__a = logging.get_logger(__name__)
__a = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''imagegpt'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=512 + 1 ,_SCREAMING_SNAKE_CASE=32 * 32 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : Union[str, Any] = n_embd
UpperCAmelCase_ : Any = n_layer
UpperCAmelCase_ : Optional[Any] = n_head
UpperCAmelCase_ : Union[str, Any] = n_inner
UpperCAmelCase_ : List[Any] = activation_function
UpperCAmelCase_ : List[str] = resid_pdrop
UpperCAmelCase_ : str = embd_pdrop
UpperCAmelCase_ : Optional[Any] = attn_pdrop
UpperCAmelCase_ : Dict = layer_norm_epsilon
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Dict = scale_attn_weights
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : List[str] = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ : Tuple = reorder_and_upcast_attn
UpperCAmelCase_ : int = tie_word_embeddings
super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
class __a( _a ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,) -> Mapping[str, Any]:
UpperCAmelCase_ : Any = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) )
return inputs | 30 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = filter(lambda _lowercase : p.requires_grad , model.parameters() )
UpperCAmelCase_ : Dict = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__a = logging.getLogger(__name__)
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if metric == "rouge2":
UpperCAmelCase_ : Any = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
UpperCAmelCase_ : Union[str, Any] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
UpperCAmelCase_ : Dict = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
UpperCAmelCase_ : Optional[int] = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
''' function.''' )
UpperCAmelCase_ : Union[str, Any] = ModelCheckpoint(
dirpath=_lowercase , filename=_lowercase , monitor=f'''val_{metric}''' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=_lowercase , verbose=_lowercase , )
class __a( pl.Callback ):
"""simple docstring"""
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ : Any = {f'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> None:
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
UpperCAmelCase_ : str = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
UpperCAmelCase_ : Tuple = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase_ : str = od / '''test_results.txt'''
UpperCAmelCase_ : Dict = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase_ : Optional[Any] = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
UpperCAmelCase_ : List[str] = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
generations_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ,'''a+''' ) as writer:
for key in sorted(_SCREAMING_SNAKE_CASE ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase_ : str = metrics[key]
if isinstance(_SCREAMING_SNAKE_CASE ,torch.Tensor ):
UpperCAmelCase_ : int = val.item()
UpperCAmelCase_ : List[Any] = f'''{key}: {val:.6f}\n'''
writer.write(_SCREAMING_SNAKE_CASE )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase_ : int = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
try:
UpperCAmelCase_ : int = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase_ : Optional[Any] = pl_module.model.num_parameters()
UpperCAmelCase_ : Dict = count_trainable_parameters(_SCREAMING_SNAKE_CASE )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
save_json(pl_module.metrics ,pl_module.metrics_save_path )
return self._write_logs(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,'''test''' )
@rank_zero_only
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]:
save_json(pl_module.metrics ,pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 30 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__a = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
__a = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase_ : Union[str, Any] = int(re.match(r'''.*layer_(\d*).*''' , _lowercase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase_ : Any = re.search(r'''[^\d](\d+)$''' , str(_lowercase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
UpperCAmelCase_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if bloom_config_file == "":
UpperCAmelCase_ : Tuple = BloomConfig()
else:
UpperCAmelCase_ : Optional[int] = BloomConfig.from_json_file(_lowercase )
if shard_model:
UpperCAmelCase_ : Any = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = {'''weight_map''': {}, '''metadata''': {}}
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = BloomConfig()
for j, file in enumerate(_lowercase ):
print('''Processing file: {}'''.format(_lowercase ) )
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : Tuple = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Any = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : Dict = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Union[str, Any] = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : Tuple = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : List[str] = tensors[key] / pretraining_tp
torch.save(
_lowercase , os.path.join(
_lowercase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase_ : Union[str, Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) )
UpperCAmelCase_ : List[Any] = BloomConfig()
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : List[str] = total_size
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_lowercase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Optional[Any] = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n'''
f.write(_lowercase )
else:
UpperCAmelCase_ : Any = BloomModel(_lowercase )
UpperCAmelCase_ : Tuple = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = None
for i, file in enumerate(_lowercase ):
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : List[Any] = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Optional[int] = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : str = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Dict = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : Dict = tensors[key] / pretraining_tp
UpperCAmelCase_ : Tuple = model.load_state_dict(_lowercase , strict=_lowercase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
UpperCAmelCase_ : Union[str, Any] = set(other_keys.missing_keys )
else:
UpperCAmelCase_ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCAmelCase_ : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
UpperCAmelCase_ : Optional[int] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _lowercase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
__a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 30 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = (3, 32, 128)
UpperCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : Any = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCAmelCase_ : Any = dict(zip(_SCREAMING_SNAKE_CASE ,range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
UpperCAmelCase_ : List[str] = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,_SCREAMING_SNAKE_CASE )
with open(self.image_processor_file ,'''w''' ,encoding='''utf-8''' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Any:
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : List[str] = np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )
UpperCAmelCase_ : Optional[int] = Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE ,0 ,-1 ) )
return image_input
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : Tuple = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = MgpstrProcessor.from_pretrained(self.tmpdirname ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer ,_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : str = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
UpperCAmelCase_ : List[Any] = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
UpperCAmelCase_ : int = MgpstrProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer ,_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : Dict = processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Dict = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = '''test'''
UpperCAmelCase_ : Optional[Any] = processor(text=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = tokenizer(_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = self.get_image_processor()
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = '''test'''
UpperCAmelCase_ : List[Any] = self.prepare_image_inputs()
UpperCAmelCase_ : List[str] = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Dict = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : List[str] = processor.char_decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [seq.replace(''' ''' ,'''''' ) for seq in decoded_tok]
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = torch.randn(1 ,27 ,38 )
UpperCAmelCase_ : Any = torch.randn(1 ,27 ,50_257 )
UpperCAmelCase_ : List[Any] = torch.randn(1 ,27 ,30_522 )
UpperCAmelCase_ : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) ,['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 30 |
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
for i in range(1 , 1001 ):
total += i**i
return str(_lowercase )[-10:]
if __name__ == "__main__":
print(solution()) | 30 | 1 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__a = logging.get_logger('transformers.models.encodec')
__a = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
__a = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
__a = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
__a = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
__a = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
__a = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__a = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__a = []
__a = []
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
for attribute in key.split('''.''' ):
UpperCAmelCase_ : Optional[Any] = getattr(_lowercase , _lowercase )
if weight_type is not None:
UpperCAmelCase_ : List[Any] = getattr(_lowercase , _lowercase ).shape
else:
UpperCAmelCase_ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase_ : Tuple = value
elif weight_type == "weight_g":
UpperCAmelCase_ : Any = value
elif weight_type == "weight_v":
UpperCAmelCase_ : List[Any] = value
elif weight_type == "bias":
UpperCAmelCase_ : str = value
elif weight_type == "running_mean":
UpperCAmelCase_ : Union[str, Any] = value
elif weight_type == "running_var":
UpperCAmelCase_ : str = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase_ : List[Any] = value
elif weight_type == "weight_ih_l0":
UpperCAmelCase_ : str = value
elif weight_type == "weight_hh_l0":
UpperCAmelCase_ : str = value
elif weight_type == "bias_ih_l0":
UpperCAmelCase_ : Optional[Any] = value
elif weight_type == "bias_hh_l0":
UpperCAmelCase_ : int = value
elif weight_type == "weight_ih_l1":
UpperCAmelCase_ : List[Any] = value
elif weight_type == "weight_hh_l1":
UpperCAmelCase_ : int = value
elif weight_type == "bias_ih_l1":
UpperCAmelCase_ : int = value
elif weight_type == "bias_hh_l1":
UpperCAmelCase_ : List[str] = value
else:
UpperCAmelCase_ : int = value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase_, UpperCAmelCase_ : Dict = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCAmelCase_ : Tuple = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCAmelCase_ : str = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(_lowercase , _lowercase ):
logger.info(f'''{name} was ignored''' )
continue
UpperCAmelCase_ : int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = key.split('''.*.''' )
if prefix in name and suffix in name:
UpperCAmelCase_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
UpperCAmelCase_ : int = True
if "*" in mapped_key:
UpperCAmelCase_ : Optional[int] = name.split(_lowercase )[0].split('''.''' )[-2]
UpperCAmelCase_ : Optional[int] = mapped_key.replace('''*''' , _lowercase )
if "weight_g" in name:
UpperCAmelCase_ : List[Any] = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase_ : Tuple = '''weight_v'''
elif "weight_ih_l0" in name:
UpperCAmelCase_ : Optional[int] = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
UpperCAmelCase_ : Optional[int] = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
UpperCAmelCase_ : Dict = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
UpperCAmelCase_ : Any = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
UpperCAmelCase_ : int = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
UpperCAmelCase_ : int = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
UpperCAmelCase_ : Dict = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
UpperCAmelCase_ : Union[str, Any] = '''bias_hh_l1'''
elif "bias" in name:
UpperCAmelCase_ : List[str] = '''bias'''
elif "weight" in name:
UpperCAmelCase_ : int = '''weight'''
elif "running_mean" in name:
UpperCAmelCase_ : Tuple = '''running_mean'''
elif "running_var" in name:
UpperCAmelCase_ : Dict = '''running_var'''
elif "num_batches_tracked" in name:
UpperCAmelCase_ : Tuple = '''num_batches_tracked'''
else:
UpperCAmelCase_ : Dict = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ : int = EncodecConfig.from_pretrained(_lowercase )
else:
UpperCAmelCase_ : List[str] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCAmelCase_ : Union[str, Any] = [8, 5, 4, 4]
UpperCAmelCase_ : str = [2.2]
UpperCAmelCase_ : Optional[Any] = 64
UpperCAmelCase_ : str = 32000
UpperCAmelCase_ : Tuple = 2048
UpperCAmelCase_ : str = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : int = False
elif model_name == "encodec_48khz":
UpperCAmelCase_ : List[str] = [8, 5, 4, 2]
UpperCAmelCase_ : Optional[int] = [3.0, 6.0, 12.0, 24.0]
UpperCAmelCase_ : Optional[int] = 48000
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Any = '''time_group_norm'''
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Union[str, Any] = 1.0
UpperCAmelCase_ : List[Any] = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
UpperCAmelCase_ : int = EncodecModel(_lowercase )
UpperCAmelCase_ : List[Any] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_lowercase )
UpperCAmelCase_ : int = torch.load(_lowercase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCAmelCase_ : Optional[int] = original_checkpoint['''best_state''']
recursively_load_weights(_lowercase , _lowercase , _lowercase )
model.save_pretrained(_lowercase )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(_lowercase )
model.push_to_hub(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__a = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 30 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__a = None
__a = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__a = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = None
# Automatically constructed
lowerCAmelCase = "PIL.Image.Image"
lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase = field(default='''Image''' , init=_a , repr=_a )
def __call__( self ) -> Tuple:
return self.pa_type
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Dict = path.split('''::''' )[-1]
try:
UpperCAmelCase_ : Optional[int] = string_to_dict(_SCREAMING_SNAKE_CASE ,config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase_ : Tuple = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
UpperCAmelCase_ : Optional[Any] = None
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ,use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
UpperCAmelCase_ : List[str] = BytesIO(f.read() )
UpperCAmelCase_ : Optional[Any] = PIL.Image.open(bytes_ )
else:
UpperCAmelCase_ : List[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCAmelCase_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase_ : Dict = storage.field('''bytes''' )
else:
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase_ : int = storage.field('''path''' )
else:
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f:
UpperCAmelCase_ : Any = f.read()
return bytes_
UpperCAmelCase_ : Union[str, Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
UpperCAmelCase_ : List[str] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,)
UpperCAmelCase_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def lowerCamelCase__ ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase_ : Optional[int] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase_ : int = image.format
else:
UpperCAmelCase_ : List[Any] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(_lowercase , format=_lowercase )
return buffer.getvalue()
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if hasattr(_lowercase , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
UpperCAmelCase_ : Tuple = array.dtype
UpperCAmelCase_ : List[str] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
UpperCAmelCase_ : Dict = dtype.kind
UpperCAmelCase_ : Union[str, Any] = dtype.itemsize
UpperCAmelCase_ : Optional[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase_ : Tuple = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase_ : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase_ : Union[str, Any] = dtype_byteorder + dtype_kind + str(_lowercase )
UpperCAmelCase_ : str = np.dtype(_lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCAmelCase_ : Any = PIL.Image.fromarray(array.astype(_lowercase ) )
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
UpperCAmelCase_, UpperCAmelCase_ : Tuple = first_non_null_value(_lowercase )
if isinstance(_lowercase , _lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowercase , np.ndarray ):
UpperCAmelCase_ : Any = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
elif isinstance(_lowercase , PIL.Image.Image ):
UpperCAmelCase_ : Union[str, Any] = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
else:
return objs
else:
return objs | 30 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Tuple = len(_lowercase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCAmelCase_ : Tuple = i + 1
else:
UpperCAmelCase_ : Union[str, Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""") | 30 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
UpperCAmelCase_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
UpperCAmelCase_ : Optional[int] = load_dataset('''nielsr/rvlcdip-demo''' )
UpperCAmelCase_ : Optional[Any] = dataset['''train'''][0]['''image'''].convert('''RGB''' )
UpperCAmelCase_ : str = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.logits
UpperCAmelCase_ : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.float ,)
self.assertTrue(torch.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) | 30 | 1 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__a = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__a = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__a = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) ,homepage='''https://github.com/hendrycks/math''' ,codebase_urls=['''https://github.com/hendrycks/math'''] ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : List[Any] = 0.0
for i, j in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
n_correct += 1.0 if math_equivalence.is_equiv(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else 0.0
UpperCAmelCase_ : str = n_correct / len(_SCREAMING_SNAKE_CASE )
return {
"accuracy": accuracy,
} | 30 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) | 30 | 1 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=14 ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=99 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=37 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0.02 ,) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Optional[Any] = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Optional[int] = rotary_dim
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : List[str] = max_position_embeddings
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Any = vocab_size - 1
UpperCAmelCase_ : Optional[int] = vocab_size - 1
UpperCAmelCase_ : Optional[int] = vocab_size - 1
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Any = None
if self.use_input_mask:
UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : List[Any] = GPTJConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,use_cache=_SCREAMING_SNAKE_CASE ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,rotary_dim=self.rotary_dim ,)
return (config, input_ids, input_mask)
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : Any = 20
UpperCAmelCase_ : List[Any] = model_class_name(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = model.init_cache(input_ids.shape[0] ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = jnp.ones((input_ids.shape[0], max_decoder_length) ,dtype='''i4''' )
UpperCAmelCase_ : List[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] ,(input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase_ : Optional[Any] = model(
input_ids[:, :-1] ,attention_mask=_SCREAMING_SNAKE_CASE ,past_key_values=_SCREAMING_SNAKE_CASE ,position_ids=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Dict = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] ,dtype='''i4''' )
UpperCAmelCase_ : Dict = model(
input_ids[:, -1:] ,attention_mask=_SCREAMING_SNAKE_CASE ,past_key_values=outputs_cache.past_key_values ,position_ids=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=f'''Max diff is {diff}''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Dict = 20
UpperCAmelCase_ : Tuple = model_class_name(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] ,axis=-1 ,)
UpperCAmelCase_ : List[str] = model.init_cache(input_ids.shape[0] ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] ,(input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase_ : str = model(
input_ids[:, :-1] ,attention_mask=_SCREAMING_SNAKE_CASE ,past_key_values=_SCREAMING_SNAKE_CASE ,position_ids=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] ,dtype='''i4''' )
UpperCAmelCase_ : Tuple = model(
input_ids[:, -1:] ,past_key_values=outputs_cache.past_key_values ,attention_mask=_SCREAMING_SNAKE_CASE ,position_ids=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=f'''Max diff is {diff}''' )
@require_flax
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCAmelCase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = FlaxGPTJModelTester(self )
def a__ ( self ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Dict:
for model_class_name in self.all_model_classes:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@tooslow
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = GPTaTokenizer.from_pretrained('''gpt2''' ,pad_token='''<|endoftext|>''' ,padding_side='''left''' )
UpperCAmelCase_ : Any = tokenizer(['''Hello this is a long string''', '''Hey'''] ,return_tensors='''np''' ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Union[str, Any] = model.config.eos_token_id
UpperCAmelCase_ : Optional[int] = jax.jit(model.generate )
UpperCAmelCase_ : Any = jit_generate(
inputs['''input_ids'''] ,attention_mask=inputs['''attention_mask'''] ,pad_token_id=tokenizer.pad_token_id ).sequences
UpperCAmelCase_ : int = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE ,skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_, UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase_ : Dict = self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase_ : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase_ : Tuple = getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_ : List[str] = pt_inputs['''input_ids'''].shape
UpperCAmelCase_ : Dict = np.random.randint(0 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[int] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
UpperCAmelCase_ : str = model_class(_SCREAMING_SNAKE_CASE ,dtype=jnp.floataa )
UpperCAmelCase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = fx_state
with torch.no_grad():
UpperCAmelCase_ : Tuple = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
UpperCAmelCase_ : int = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(_SCREAMING_SNAKE_CASE ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] ,pt_output[:, -1].numpy() ,4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = model_class.from_pretrained(_SCREAMING_SNAKE_CASE ,from_pt=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) ,len(_SCREAMING_SNAKE_CASE ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] ,pt_output[:, -1].numpy() ,4e-2 )
@is_pt_flax_cross_test
def a__ ( self ) -> str:
UpperCAmelCase_, UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase_ : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase_ : Dict = getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
UpperCAmelCase_ : Any = model_class(_SCREAMING_SNAKE_CASE ,dtype=jnp.floataa )
UpperCAmelCase_ : Optional[Any] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE ,fx_model.params )
UpperCAmelCase_, UpperCAmelCase_ : int = pt_inputs['''input_ids'''].shape
UpperCAmelCase_ : int = np.random.randint(0 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : List[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
UpperCAmelCase_ : Any = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(_SCREAMING_SNAKE_CASE ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] ,pt_output[:, -1].numpy() ,4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE ,from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCAmelCase_ : int = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) ,len(_SCREAMING_SNAKE_CASE ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] ,pt_output[:, -1].numpy() ,4e-2 )
@tooslow
def a__ ( self ) -> str:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : List[str] = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
UpperCAmelCase_ : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) | 30 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0.9 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,) -> Optional[int]:
UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 30}
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Any = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize_and_center_crop
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : List[str] = crop_pct
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : Any = do_normalize
UpperCAmelCase_ : str = image_mean
UpperCAmelCase_ : List[Any] = image_std
def a__ ( self ) -> str:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def a__ ( self ) -> Dict:
UpperCAmelCase_ : str = PoolFormerImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''crop_pct''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_mean''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_std''' ) )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size ,{'''height''': 30, '''width''': 30} )
UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,) | 30 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''altclip_text_model'''
def __init__( self ,_SCREAMING_SNAKE_CASE=250_002 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=4_096 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=514 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-05 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE="absolute" ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=768 ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : Any = type_vocab_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Union[str, Any] = initializer_factor
UpperCAmelCase_ : Tuple = layer_norm_eps
UpperCAmelCase_ : Union[str, Any] = position_embedding_type
UpperCAmelCase_ : List[Any] = use_cache
UpperCAmelCase_ : str = project_dim
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''altclip_vision_model'''
def __init__( self ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=224 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1.0 ,**_SCREAMING_SNAKE_CASE ,) -> int:
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : str = projection_dim
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : Union[str, Any] = initializer_factor
UpperCAmelCase_ : Optional[Any] = attention_dropout
UpperCAmelCase_ : Any = layer_norm_eps
UpperCAmelCase_ : List[Any] = hidden_act
@classmethod
def a__ ( cls ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = cls.get_config_dict(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
UpperCAmelCase_ : List[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''altclip'''
lowerCAmelCase = True
def __init__( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=2.65_92 ,**_SCREAMING_SNAKE_CASE ) -> Dict:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
UpperCAmelCase_ : Optional[int] = kwargs.pop('''text_config_dict''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = kwargs.pop('''vision_config_dict''' ,_SCREAMING_SNAKE_CASE )
super().__init__(**_SCREAMING_SNAKE_CASE )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
UpperCAmelCase_ : Optional[Any] = {}
# This is the complete result when using `text_config_dict`.
UpperCAmelCase_ : Optional[Any] = AltCLIPTextConfig(**_SCREAMING_SNAKE_CASE ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
UpperCAmelCase_ : Any = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
UpperCAmelCase_ : Optional[int] = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(_SCREAMING_SNAKE_CASE )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
UpperCAmelCase_ : Dict = {}
# This is the complete result when using `vision_config_dict`.
UpperCAmelCase_ : Tuple = AltCLIPVisionConfig(**_SCREAMING_SNAKE_CASE ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
UpperCAmelCase_ : str = {
str(_SCREAMING_SNAKE_CASE ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
UpperCAmelCase_ : Optional[int] = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
UpperCAmelCase_ : str = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(_SCREAMING_SNAKE_CASE )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
UpperCAmelCase_ : Union[str, Any] = {}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
UpperCAmelCase_ : List[Any] = {}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
UpperCAmelCase_ : List[Any] = AltCLIPTextConfig(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AltCLIPVisionConfig(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = projection_dim
UpperCAmelCase_ : List[str] = logit_scale_init_value
UpperCAmelCase_ : List[str] = 1.0
@classmethod
def a__ ( cls ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Tuple = self.text_config.to_dict()
UpperCAmelCase_ : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase_ : str = self.__class__.model_type
return output | 30 |
import unittest
import numpy as np
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = None , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = np.shape(_lowercase )
UpperCAmelCase_ : Optional[Any] = np.shape(_lowercase )
UpperCAmelCase_ : Tuple = np.shape(_lowercase )
if shape_a[0] != shape_b[0]:
UpperCAmelCase_ : Tuple = (
'''Expected the same number of rows for A and B. '''
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(_lowercase )
if shape_b[1] != shape_c[1]:
UpperCAmelCase_ : List[Any] = (
'''Expected the same number of columns for B and C. '''
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(_lowercase )
UpperCAmelCase_ : Dict = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase_ : Any = np.linalg.inv(_lowercase )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : List[str] = np.array([[2, 1], [6, 3]] )
UpperCAmelCase_ : Tuple = schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.block([[a, b], [b.T, c]] )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = np.linalg.det(_SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE ,det_a * det_s )
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> None:
UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : int = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 30 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__a = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 |
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
UpperCAmelCase_ : Any = ''''''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
UpperCAmelCase_ : Any = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase_ : Union[str, Any] = B'''=''' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
UpperCAmelCase_ : int = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowercase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Tuple = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase , _lowercase ):
try:
UpperCAmelCase_ : Any = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
UpperCAmelCase_ : str = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase_ : List[Any] = encoded_data[:-padding]
UpperCAmelCase_ : List[Any] = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase_ : Tuple = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase_ : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowercase ) , 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ):
'''simple docstring'''
UpperCAmelCase_ : Dict = {'''add_prefix_space''': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(''' ''' ) else {}
UpperCAmelCase_ : List[str] = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='''max_length''' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=None , ):
'''simple docstring'''
UpperCAmelCase_ : str = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE="train" ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="" ,) -> Tuple:
super().__init__()
UpperCAmelCase_ : Optional[Any] = Path(_SCREAMING_SNAKE_CASE ).joinpath(type_path + '''.source''' )
UpperCAmelCase_ : List[Any] = Path(_SCREAMING_SNAKE_CASE ).joinpath(type_path + '''.target''' )
UpperCAmelCase_ : Tuple = self.get_char_lens(self.src_file )
UpperCAmelCase_ : Any = max_source_length
UpperCAmelCase_ : str = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
UpperCAmelCase_ : str = tokenizer
UpperCAmelCase_ : Union[str, Any] = prefix
if n_obs is not None:
UpperCAmelCase_ : Dict = self.src_lens[:n_obs]
UpperCAmelCase_ : Union[str, Any] = src_lang
UpperCAmelCase_ : Any = tgt_lang
def __len__( self ) -> Any:
return len(self.src_lens )
def __getitem__( self ,_SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]:
UpperCAmelCase_ : Any = index + 1 # linecache starts at 1
UpperCAmelCase_ : int = self.prefix + linecache.getline(str(self.src_file ) ,_SCREAMING_SNAKE_CASE ).rstrip('''\n''' )
UpperCAmelCase_ : int = linecache.getline(str(self.tgt_file ) ,_SCREAMING_SNAKE_CASE ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_SCREAMING_SNAKE_CASE ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCAmelCase_ : Tuple = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_SCREAMING_SNAKE_CASE ) else self.tokenizer
)
UpperCAmelCase_ : Dict = self.tokenizer.generator if isinstance(self.tokenizer ,_SCREAMING_SNAKE_CASE ) else self.tokenizer
UpperCAmelCase_ : Tuple = encode_line(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,self.max_source_length ,'''right''' )
UpperCAmelCase_ : Tuple = encode_line(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,self.max_target_length ,'''right''' )
UpperCAmelCase_ : Optional[Any] = source_inputs['''input_ids'''].squeeze()
UpperCAmelCase_ : Optional[Any] = target_inputs['''input_ids'''].squeeze()
UpperCAmelCase_ : Tuple = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a__ ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
return [len(_SCREAMING_SNAKE_CASE ) for x in Path(_SCREAMING_SNAKE_CASE ).open().readlines()]
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]:
UpperCAmelCase_ : int = torch.stack([x['''input_ids'''] for x in batch] )
UpperCAmelCase_ : Dict = torch.stack([x['''attention_mask'''] for x in batch] )
UpperCAmelCase_ : Tuple = torch.stack([x['''decoder_input_ids'''] for x in batch] )
UpperCAmelCase_ : int = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_SCREAMING_SNAKE_CASE )
else self.tokenizer.pad_token_id
)
UpperCAmelCase_ : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_SCREAMING_SNAKE_CASE )
else self.tokenizer.pad_token_id
)
UpperCAmelCase_ : str = trim_batch(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = trim_batch(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__a = getLogger(__name__)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_lowercase ) )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , '''git_log.json''' ) )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ):
'''simple docstring'''
with open(_lowercase , '''w''' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
with open(_lowercase ) as f:
return json.load(_lowercase )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = git.Repo(search_parent_directories=_lowercase )
UpperCAmelCase_ : Union[str, Any] = {
'''repo_id''': str(_lowercase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return list(map(_lowercase , _lowercase ) )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
with open(_lowercase , '''wb''' ) as f:
return pickle.dump(_lowercase , _lowercase )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
def remove_articles(_lowercase ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
UpperCAmelCase_ : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = normalize_answer(_lowercase ).split()
UpperCAmelCase_ : List[Any] = normalize_answer(_lowercase ).split()
UpperCAmelCase_ : Any = Counter(_lowercase ) & Counter(_lowercase )
UpperCAmelCase_ : Tuple = sum(common.values() )
if num_same == 0:
return 0
UpperCAmelCase_ : str = 1.0 * num_same / len(_lowercase )
UpperCAmelCase_ : Union[str, Any] = 1.0 * num_same / len(_lowercase )
UpperCAmelCase_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
assert len(_lowercase ) == len(_lowercase )
UpperCAmelCase_ : Optional[int] = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCAmelCase_ : Dict = '''dropout_rate'''
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
UpperCAmelCase_ : Union[str, Any] = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config | 30 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 0
@slow
def a__ ( self ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,20 )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained('''./''' ,tokenizer_type='''xxx''' )
@require_tokenizers
def a__ ( self ) -> Optional[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase_ : Any = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
else:
self.assertEqual(tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
@require_tokenizers
def a__ ( self ) -> List[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' ,):
UpperCAmelCase_ : int = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def a__ ( self ) -> Optional[Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
UpperCAmelCase_ : int = TOKENIZER_MAPPING.values()
UpperCAmelCase_ : List[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Tuple:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ,use_fast=_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = '''Hello, world. How are you?'''
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
@require_tokenizers
def a__ ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
self.assertEqual(tokenizer.vocab_size ,30_000 )
self.assertEqual(tokenizer.unk_token ,'''[UNK]''' )
self.assertEqual(tokenizer.padding_side ,'''right''' )
self.assertEqual(tokenizer.truncation_side ,'''right''' )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size ,12 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
# Check we can load the tokenizer config of an online model.
UpperCAmelCase_ : int = get_tokenizer_config('''bert-base-cased''' )
UpperCAmelCase_ : Optional[int] = config.pop('''_commit_hash''' ,_SCREAMING_SNAKE_CASE )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_SCREAMING_SNAKE_CASE ,{'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase_ : Any = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] ,'''BertTokenizer''' )
def a__ ( self ) -> str:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def a__ ( self ) -> int:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
# Can register in two steps
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = False
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = NewTokenizer
lowerCAmelCase = False
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
def a__ ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' )
def a__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' )
def a__ ( self ) -> Any:
# Make sure we have cached the tokenizer.
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 ) | 30 | 1 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=13 ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=99 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=None ,) -> Any:
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : Optional[int] = use_input_mask
UpperCAmelCase_ : Tuple = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Optional[int] = type_sequence_label_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Any = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : Optional[int] = scope
def a__ ( self ) -> Optional[int]:
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : int = None
if self.use_input_mask:
UpperCAmelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> Any:
return MPNetConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : int = MPNetModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ : List[str] = MPNetForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Optional[int] = model(
_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,start_positions=_SCREAMING_SNAKE_CASE ,end_positions=_SCREAMING_SNAKE_CASE ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Optional[Any] = MPNetForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ : Any = self.num_choices
UpperCAmelCase_ : List[str] = MPNetForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Any = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Tuple = model(
_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ : Dict = self.num_labels
UpperCAmelCase_ : int = MPNetForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
((UpperCAmelCase_), (UpperCAmelCase_), (UpperCAmelCase_), (UpperCAmelCase_), (UpperCAmelCase_), (UpperCAmelCase_)) : Any = config_and_inputs
UpperCAmelCase_ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = True
def a__ ( self ) -> str:
UpperCAmelCase_ : str = MPNetModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def a__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*_SCREAMING_SNAKE_CASE )
@require_torch
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
UpperCAmelCase_ : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase_ : List[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) | 30 |
from functools import reduce
__a = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase__ ( _lowercase = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) )
for i in range(len(_lowercase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 30 | 1 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = parent
def a__ ( self ) -> List[str]:
return {}
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
UpperCAmelCase_ : List[str] = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = MarkupLMFeatureExtractor if is_bsa_available() else None
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Optional[int] = MarkupLMFeatureExtractionTester(self )
@property
def a__ ( self ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def a__ ( self ) -> int:
# Initialize feature_extractor
UpperCAmelCase_ : Dict = self.feature_extraction_class()
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = get_html_strings()[0]
UpperCAmelCase_ : Optional[Any] = feature_extractor(_SCREAMING_SNAKE_CASE )
# fmt: off
UpperCAmelCase_ : List[Any] = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
UpperCAmelCase_ : Any = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes ,_SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.xpaths ,_SCREAMING_SNAKE_CASE )
# Test batched
UpperCAmelCase_ : str = get_html_strings()
UpperCAmelCase_ : Tuple = feature_extractor(_SCREAMING_SNAKE_CASE )
# fmt: off
UpperCAmelCase_ : str = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
UpperCAmelCase_ : str = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) ,2 )
self.assertEqual(len(encoding.xpaths ) ,2 )
self.assertEqual(encoding.nodes ,_SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.xpaths ,_SCREAMING_SNAKE_CASE ) | 30 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
UpperCAmelCase_ : Tuple = precision
UpperCAmelCase_ : Optional[Any] = ceil(precision / 14 )
UpperCAmelCase_ : int = 426880 * Decimal(10005 ).sqrt()
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : List[Any] = 13591409
UpperCAmelCase_ : Optional[Any] = Decimal(_lowercase )
for k in range(1 , _lowercase ):
UpperCAmelCase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__a = 50
print(F"""The first {n} digits of pi is: {pi(n)}""") | 30 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = ['''pixel_values''']
def __init__( self ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = PIL.Image.BICUBIC ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 1 / 255 ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = size if size is not None else {'''height''': 256, '''width''': 256}
UpperCAmelCase_ : Tuple = get_size_dict(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE ,param_name='''crop_size''' )
UpperCAmelCase_ : Optional[int] = do_resize
UpperCAmelCase_ : Dict = size
UpperCAmelCase_ : Tuple = resample
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : Optional[int] = crop_size
UpperCAmelCase_ : Union[str, Any] = do_rescale
UpperCAmelCase_ : Any = rescale_factor
UpperCAmelCase_ : List[Any] = do_normalize
UpperCAmelCase_ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = PIL.Image.BICUBIC ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
UpperCAmelCase_ : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
_SCREAMING_SNAKE_CASE ,size=(size['''height'''], size['''width''']) ,resample=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
UpperCAmelCase_ : int = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE ,size=(size['''height'''], size['''width''']) ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> List[str]:
return rescale(_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,**_SCREAMING_SNAKE_CASE ,) -> PIL.Image.Image:
UpperCAmelCase_ : Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[Any] = resample if resample is not None else self.resample
UpperCAmelCase_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Any = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : List[Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : List[str] = size if size is not None else self.size
UpperCAmelCase_ : int = get_size_dict(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : List[str] = get_size_dict(_SCREAMING_SNAKE_CASE ,param_name='''crop_size''' )
UpperCAmelCase_ : Tuple = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
UpperCAmelCase_ : int = [self.resize(image=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
UpperCAmelCase_ : int = [self.center_crop(image=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCAmelCase_ : Optional[int] = [self.rescale(image=_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
UpperCAmelCase_ : Dict = [self.normalize(image=_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE ) for image in images]
UpperCAmelCase_ : List[Any] = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for image in images]
UpperCAmelCase_ : str = {'''pixel_values''': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE ,tensor_type=_SCREAMING_SNAKE_CASE ) | 30 |
from __future__ import annotations
import math
__a = '2020.9.26'
__a = 'xcodz-dot, cclaus, dhruvmanila'
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ):
UpperCAmelCase_ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(_lowercase )
UpperCAmelCase_ : Tuple = ((x * distance) / (z + distance)) * scale
UpperCAmelCase_ : str = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Axis must be a str''' )
UpperCAmelCase_ : Optional[Any] = locals()
del input_variables["axis"]
if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ):
UpperCAmelCase_ : List[Any] = (
'''Input values except axis must either be float or int: '''
f'''{list(input_variables.values() )}'''
)
raise TypeError(_lowercase )
UpperCAmelCase_ : Dict = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
UpperCAmelCase_ : Optional[int] = x * math.cos(_lowercase ) - y * math.sin(_lowercase )
UpperCAmelCase_ : List[Any] = y * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z
elif axis == "x":
UpperCAmelCase_ : Any = y * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : int = z * math.cos(_lowercase ) + y * math.sin(_lowercase )
UpperCAmelCase_ : Dict = x
elif axis == "y":
UpperCAmelCase_ : Union[str, Any] = x * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""") | 30 | 1 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE="" ,_SCREAMING_SNAKE_CASE="train" ) -> Tuple:
assert os.path.isdir(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Union[str, Any] = os.listdir(_SCREAMING_SNAKE_CASE )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
UpperCAmelCase_ : int = os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ):
continue
self.documents.append(_SCREAMING_SNAKE_CASE )
def __len__( self ) -> Tuple:
return len(self.documents )
def __getitem__( self ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : int = self.documents[idx]
UpperCAmelCase_ : str = document_path.split('''/''' )[-1]
with open(_SCREAMING_SNAKE_CASE ,encoding='''utf-8''' ) as source:
UpperCAmelCase_ : Any = source.read()
UpperCAmelCase_, UpperCAmelCase_ : Dict = process_story(_SCREAMING_SNAKE_CASE )
return document_name, story_lines, summary_lines
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = list(filter(lambda _lowercase : len(_lowercase ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
UpperCAmelCase_ : Union[str, Any] = [_add_missing_period(_lowercase ) for line in nonempty_lines]
# gather article lines
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Any = deque(_lowercase )
while True:
try:
UpperCAmelCase_ : Union[str, Any] = lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(_lowercase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
UpperCAmelCase_ : int = list(filter(lambda _lowercase : not t.startswith('''@highlight''' ) , _lowercase ) )
return story_lines, summary_lines
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if len(_lowercase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_lowercase )) )
return sequence
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = torch.ones_like(_lowercase )
UpperCAmelCase_ : Tuple = sequence == pad_token_id
UpperCAmelCase_ : Union[str, Any] = 0
return mask
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = [tokenizer.encode(_lowercase ) for line in story_lines]
UpperCAmelCase_ : Tuple = [token for sentence in story_lines_token_ids for token in sentence]
UpperCAmelCase_ : int = [tokenizer.encode(_lowercase ) for line in summary_lines]
UpperCAmelCase_ : List[str] = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = []
for sequence in batch:
UpperCAmelCase_ : str = -1
UpperCAmelCase_ : Optional[Any] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_lowercase )
return torch.tensor(_lowercase ) | 30 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__a = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__a = concatenate_datasets
__a = DownloadConfig
__a = DownloadManager
__a = DownloadMode
__a = DownloadConfig
__a = DownloadMode
__a = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 30 | 1 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase="attention" ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = params[f'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
UpperCAmelCase_ : Any = params[f'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
UpperCAmelCase_ : Optional[int] = params[f'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
UpperCAmelCase_ : Tuple = params[f'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
if split_mlp_wi:
UpperCAmelCase_ : Dict = params[f'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
UpperCAmelCase_ : Union[str, Any] = params[f'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
UpperCAmelCase_ : Union[str, Any] = (wi_a, wi_a)
else:
UpperCAmelCase_ : Tuple = params[f'''{prefix}/layers_{i}/mlp/wi/kernel''']
UpperCAmelCase_ : List[str] = params[f'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
return params[f'''{prefix}/layers_{i}/{layer_name}/scale''']
def lowerCamelCase__ ( _lowercase , *, _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = traverse_util.flatten_dict(variables['''target'''] )
UpperCAmelCase_ : Tuple = {'''/'''.join(_lowercase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCAmelCase_ : Any = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , _lowercase )
UpperCAmelCase_ : int = collections.OrderedDict()
# Shared embeddings.
UpperCAmelCase_ : Any = old['''token_embedder/embedding''']
# Encoder.
for i in range(_lowercase ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase_ : int = tax_layer_norm_lookup(_lowercase , _lowercase , '''encoder''' , '''pre_attention_layer_norm''' )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = tax_attention_lookup(_lowercase , _lowercase , '''encoder''' , '''attention''' )
UpperCAmelCase_ : Tuple = layer_norm
UpperCAmelCase_ : str = k.T
UpperCAmelCase_ : int = o.T
UpperCAmelCase_ : Optional[Any] = q.T
UpperCAmelCase_ : str = v.T
# Block i, layer 1 (MLP).
UpperCAmelCase_ : List[str] = tax_layer_norm_lookup(_lowercase , _lowercase , '''encoder''' , '''pre_mlp_layer_norm''' )
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = tax_mlp_lookup(_lowercase , _lowercase , '''encoder''' , _lowercase )
UpperCAmelCase_ : int = layer_norm
if split_mlp_wi:
UpperCAmelCase_ : str = wi[0].T
UpperCAmelCase_ : Optional[int] = wi[1].T
else:
UpperCAmelCase_ : List[str] = wi.T
UpperCAmelCase_ : Tuple = wo.T
UpperCAmelCase_ : int = old[
'''encoder/relpos_bias/rel_embedding'''
].T
UpperCAmelCase_ : int = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(_lowercase ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase_ : Tuple = tax_layer_norm_lookup(_lowercase , _lowercase , '''decoder''' , '''pre_self_attention_layer_norm''' )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = tax_attention_lookup(_lowercase , _lowercase , '''decoder''' , '''self_attention''' )
UpperCAmelCase_ : str = layer_norm
UpperCAmelCase_ : Dict = k.T
UpperCAmelCase_ : int = o.T
UpperCAmelCase_ : int = q.T
UpperCAmelCase_ : Union[str, Any] = v.T
# Block i, layer 1 (Cross Attention).
UpperCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(_lowercase , _lowercase , '''decoder''' , '''pre_cross_attention_layer_norm''' )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : List[str] = tax_attention_lookup(_lowercase , _lowercase , '''decoder''' , '''encoder_decoder_attention''' )
UpperCAmelCase_ : int = layer_norm
UpperCAmelCase_ : Dict = k.T
UpperCAmelCase_ : Tuple = o.T
UpperCAmelCase_ : List[str] = q.T
UpperCAmelCase_ : Tuple = v.T
# Block i, layer 2 (MLP).
UpperCAmelCase_ : List[Any] = tax_layer_norm_lookup(_lowercase , _lowercase , '''decoder''' , '''pre_mlp_layer_norm''' )
UpperCAmelCase_, UpperCAmelCase_ : Tuple = tax_mlp_lookup(_lowercase , _lowercase , '''decoder''' , _lowercase )
UpperCAmelCase_ : str = layer_norm
if split_mlp_wi:
UpperCAmelCase_ : List[Any] = wi[0].T
UpperCAmelCase_ : List[Any] = wi[1].T
else:
UpperCAmelCase_ : Any = wi.T
UpperCAmelCase_ : Dict = wo.T
UpperCAmelCase_ : Any = old['''decoder/decoder_norm/scale''']
UpperCAmelCase_ : Tuple = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCAmelCase_ : Union[str, Any] = old['''decoder/logits_dense/kernel'''].T
return new
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase_ : List[str] = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase_ : Any = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
UpperCAmelCase_ : str = state_dict['''shared.weight''']
return state_dict
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = checkpoints.load_tax_checkpoint(_lowercase )
UpperCAmelCase_ : List[Any] = convert_tax_to_pytorch(_lowercase , num_layers=config.num_layers , is_encoder_only=_lowercase )
UpperCAmelCase_ : Optional[Any] = make_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase , strict=_lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = False ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = TaConfig.from_json_file(_lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCAmelCase_ : List[str] = TaEncoderModel(_lowercase )
else:
UpperCAmelCase_ : Optional[Any] = TaForConditionalGeneration(_lowercase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_lowercase , _lowercase , _lowercase , _lowercase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_lowercase )
# Verify that we can load the checkpoint.
model.from_pretrained(_lowercase )
print('''Done''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__a = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
) | 30 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
while a != 0:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = b % a, a
return b
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if gcd(_lowercase , _lowercase ) != 1:
UpperCAmelCase_ : int = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowercase )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = 1, 0, a
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = 0, 1, m
while va != 0:
UpperCAmelCase_ : List[Any] = ua // va
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 30 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = '▁'
__a = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__a = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__a = {'vinai/bartpho-syllable': 1_024}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="<unk>" ,_SCREAMING_SNAKE_CASE="<pad>" ,_SCREAMING_SNAKE_CASE="<mask>" ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : List[Any] = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else mask_token
UpperCAmelCase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,sp_model_kwargs=self.sp_model_kwargs ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : Any = monolingual_vocab_file
UpperCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : Any = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase_ : str = cnt
cnt += 1
with open(_SCREAMING_SNAKE_CASE ,'''r''' ,encoding='''utf-8''' ) as f:
for line in f.readlines():
UpperCAmelCase_ : int = line.strip().split()[0]
UpperCAmelCase_ : Tuple = len(self.fairseq_tokens_to_ids )
if str(_SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase_ : Tuple = len(self.fairseq_tokens_to_ids )
UpperCAmelCase_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> str:
UpperCAmelCase_ : Any = self.__dict__.copy()
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ : List[str] = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
UpperCAmelCase_ : Optional[Any] = {}
UpperCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Any = [self.cls_token_id]
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE ,token_ids_a=_SCREAMING_SNAKE_CASE ,already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
UpperCAmelCase_ : Dict = [self.sep_token_id]
UpperCAmelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ ( self ) -> Optional[Any]:
return len(self.fairseq_ids_to_tokens )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : List[str] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.sp_model.encode(_SCREAMING_SNAKE_CASE ,out_type=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> int:
return self.fairseq_ids_to_tokens[index]
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ''''''.join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE ,''' ''' ).strip()
return out_string
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : Tuple = os.path.join(
_SCREAMING_SNAKE_CASE ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : List[str] = os.path.join(
_SCREAMING_SNAKE_CASE ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE ,'''wb''' ) as fi:
UpperCAmelCase_ : str = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,_SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_SCREAMING_SNAKE_CASE ,'''w''' ,encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(_SCREAMING_SNAKE_CASE )} \n''' )
return out_vocab_file, out_monolingual_vocab_file | 30 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCAmelCase = '''CIDAS/clipseg-rd64-refined'''
lowerCAmelCase = '''image_segmenter'''
lowerCAmelCase = CLIPSegForImageSegmentation
lowerCAmelCase = ['''image''', '''text''']
lowerCAmelCase = ['''image''']
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self ,['''vision'''] )
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.pre_processor(text=[label] ,images=[image] ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
with torch.no_grad():
UpperCAmelCase_ : Dict = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : Dict = outputs.cpu().detach().numpy()
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) ) | 30 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''megatron-bert'''
def __init__( self ,_SCREAMING_SNAKE_CASE=29_056 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=4_096 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-12 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE="absolute" ,_SCREAMING_SNAKE_CASE=True ,**_SCREAMING_SNAKE_CASE ,) -> Tuple:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : Tuple = type_vocab_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : Dict = position_embedding_type
UpperCAmelCase_ : int = use_cache | 30 |
import numpy as np
import datasets
__a = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__a = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__a = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' ,id='''sequence''' ) ,id='''X''' ),
} ) ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# convert to numpy arrays
UpperCAmelCase_ : str = np.array(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.array(_SCREAMING_SNAKE_CASE )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
UpperCAmelCase_ : List[str] = X - np.mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = np.cov(reference_distribution.T )
try:
UpperCAmelCase_ : Any = np.linalg.inv(_SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
UpperCAmelCase_ : List[str] = np.linalg.pinv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = np.dot(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.dot(_SCREAMING_SNAKE_CASE ,X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 30 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__a = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__a = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_a )
class __a:
"""simple docstring"""
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
elif titles is None or texts is None:
UpperCAmelCase_ : List[str] = titles if texts is None else texts
return super().__call__(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[Any] = titles if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [titles]
UpperCAmelCase_ : List[str] = texts if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [texts]
UpperCAmelCase_ : Any = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = questions if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [questions] * n_passages
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.''' )
UpperCAmelCase_ : Tuple = super().__call__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : int = super().__call__(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : Optional[int] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ : Dict = attention_mask
return self.pad(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 4 ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = reader_input['''input_ids''']
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = reader_output[:3]
UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = sorted(range(_SCREAMING_SNAKE_CASE ) ,reverse=_SCREAMING_SNAKE_CASE ,key=relevance_logits.__getitem__ )
UpperCAmelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_ : List[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_SCREAMING_SNAKE_CASE ,top_spans=_SCREAMING_SNAKE_CASE ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_SCREAMING_SNAKE_CASE ,start_index=_SCREAMING_SNAKE_CASE ,end_index=_SCREAMING_SNAKE_CASE ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = []
for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ : int = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] ,reverse=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
UpperCAmelCase_ : str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = ['''input_ids''', '''attention_mask'''] | 30 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=13 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=37 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE="divided_space_time" ,_SCREAMING_SNAKE_CASE=None ,) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Dict = num_frames
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = attention_type
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = scope
UpperCAmelCase_ : List[str] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
UpperCAmelCase_ : str = (image_size // patch_size) ** 2
UpperCAmelCase_ : Dict = (num_frames) * self.num_patches_per_frame + 1
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] ,self.num_labels )
UpperCAmelCase_ : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Any:
UpperCAmelCase_ : List[Any] = TimesformerConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,attention_type=self.attention_type ,)
UpperCAmelCase_ : Any = self.num_labels
return config
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = TimesformerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ : int = TimesformerForVideoClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
# verify the logits shape
UpperCAmelCase_ : Optional[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = config_and_inputs
UpperCAmelCase_ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def a__ ( self ) -> str:
UpperCAmelCase_ : Dict = TimesformerModelTester(self )
UpperCAmelCase_ : str = ConfigTester(
self ,config_class=_SCREAMING_SNAKE_CASE ,has_text_modality=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> List[Any]:
UpperCAmelCase_ : List[str] = copy.deepcopy(_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def a__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def a__ ( self ) -> Union[str, Any]:
pass
def a__ ( self ) -> Dict:
UpperCAmelCase_, UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE ,nn.Linear ) )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_, UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : str = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Any = [*signature.parameters.keys()]
UpperCAmelCase_ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> List[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = TimesformerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
if not self.has_attentions:
pass
else:
UpperCAmelCase_, UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = True
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = self.model_tester.seq_length
UpperCAmelCase_ : Dict = self.model_tester.num_frames
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Optional[Any] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Dict = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Union[str, Any] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,)
UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : List[str] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Any = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1 ,len(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : str = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,)
def a__ ( self ) -> Optional[int]:
def check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : List[Any] = outputs.hidden_states
UpperCAmelCase_ : Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
UpperCAmelCase_ : List[Any] = np.load(_lowercase )
return list(_lowercase )
@require_torch
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a__ ( self ) -> List[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def a__ ( self ) -> int:
UpperCAmelCase_ : Any = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.default_image_processor
UpperCAmelCase_ : Optional[Any] = prepare_video()
UpperCAmelCase_ : List[str] = image_processor(video[:8] ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase_ : List[str] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) | 30 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 | 1 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __a( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = JukeboxTokenizer
lowerCAmelCase = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def a__ ( self ) -> str:
import torch
UpperCAmelCase_ : int = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
UpperCAmelCase_ : List[str] = tokenizer(**self.metas )['''input_ids''']
# fmt: off
UpperCAmelCase_ : List[Any] = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
@require_torch
def a__ ( self ) -> List[str]:
import torch
UpperCAmelCase_ : List[Any] = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
UpperCAmelCase_ : int = tokenizer(**self.metas )['''input_ids''']
# fmt: off
UpperCAmelCase_ : Dict = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) ) | 30 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''wav2vec2'''
def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.05 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="sum" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1_500) ,_SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Tuple = feat_extract_norm
UpperCAmelCase_ : List[Any] = feat_extract_activation
UpperCAmelCase_ : str = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = conv_bias
UpperCAmelCase_ : str = num_conv_pos_embeddings
UpperCAmelCase_ : Any = num_conv_pos_embedding_groups
UpperCAmelCase_ : Tuple = len(self.conv_dim )
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : str = hidden_dropout
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Tuple = activation_dropout
UpperCAmelCase_ : List[str] = feat_proj_dropout
UpperCAmelCase_ : int = final_dropout
UpperCAmelCase_ : Union[str, Any] = layerdrop
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Optional[int] = do_stable_layer_norm
UpperCAmelCase_ : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : Optional[int] = apply_spec_augment
UpperCAmelCase_ : Tuple = mask_time_prob
UpperCAmelCase_ : Optional[Any] = mask_time_length
UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks
UpperCAmelCase_ : Optional[Any] = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Union[str, Any] = num_codevectors_per_group
UpperCAmelCase_ : Any = num_codevector_groups
UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature
UpperCAmelCase_ : List[str] = feat_quantizer_dropout
UpperCAmelCase_ : Dict = num_negatives
UpperCAmelCase_ : List[str] = codevector_dim
UpperCAmelCase_ : List[str] = proj_codevector_dim
UpperCAmelCase_ : str = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : List[Any] = ctc_loss_reduction
UpperCAmelCase_ : List[str] = ctc_zero_infinity
# adapter
UpperCAmelCase_ : Optional[Any] = add_adapter
UpperCAmelCase_ : Any = adapter_kernel_size
UpperCAmelCase_ : Optional[int] = adapter_stride
UpperCAmelCase_ : List[Any] = num_adapter_layers
UpperCAmelCase_ : Optional[Any] = output_hidden_size or hidden_size
UpperCAmelCase_ : Optional[int] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = xvector_output_dim
@property
def a__ ( self ) -> Any:
return functools.reduce(operator.mul ,self.conv_stride ,1 ) | 30 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__a = logging.get_logger(__name__)
__a = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = '''bit'''
lowerCAmelCase = ['''preactivation''', '''bottleneck''']
lowerCAmelCase = ['''SAME''', '''VALID''']
def __init__( self ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=[256, 512, 1_024, 2_048] ,_SCREAMING_SNAKE_CASE=[3, 4, 6, 3] ,_SCREAMING_SNAKE_CASE="preactivation" ,_SCREAMING_SNAKE_CASE="relu" ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[Any]:
super().__init__(**_SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
UpperCAmelCase_ : str = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Dict = embedding_size
UpperCAmelCase_ : Union[str, Any] = hidden_sizes
UpperCAmelCase_ : Optional[int] = depths
UpperCAmelCase_ : Tuple = layer_type
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : int = global_padding
UpperCAmelCase_ : Optional[int] = num_groups
UpperCAmelCase_ : Dict = drop_path_rate
UpperCAmelCase_ : List[Any] = embedding_dynamic_padding
UpperCAmelCase_ : Optional[int] = output_stride
UpperCAmelCase_ : int = width_factor
UpperCAmelCase_ : Tuple = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 ,len(_SCREAMING_SNAKE_CASE ) + 1 )]
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE ,out_indices=_SCREAMING_SNAKE_CASE ,stage_names=self.stage_names ) | 30 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1 ) -> Dict:
UpperCAmelCase_ : List[Any] = tokenizer
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
UpperCAmelCase_ : Optional[int] = n_copies
def __iter__( self ) -> Any:
UpperCAmelCase_ : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : str = start_length
UpperCAmelCase_ : Optional[int] = eof_strings
UpperCAmelCase_ : str = tokenizer
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCAmelCase_ : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = defaultdict(_lowercase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowercase ) ):
with torch.no_grad():
UpperCAmelCase_ : Dict = batch['''ids'''].shape[-1]
UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(_lowercase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase )
# each task is generated batch_size times
UpperCAmelCase_ : Union[str, Any] = batch['''task_id'''].repeat(_lowercase )
UpperCAmelCase_ : Dict = accelerator.pad_across_processes(
_lowercase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCAmelCase_, UpperCAmelCase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase_ : Union[str, Any] = generated_tokens.cpu().numpy()
UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowercase , _lowercase ):
gen_token_dict[task].append(_lowercase )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_lowercase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
code_gens[task].append(remove_last_block(_lowercase ) )
return code_gens
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_lowercase )
UpperCAmelCase_ : int = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase_ : List[Any] = '''false'''
if args.num_workers is None:
UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase_ : int = Accelerator()
set_seed(args.seed , device_specific=_lowercase )
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ : Any = tokenizer.eos_token
UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase_ : str = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase_ : Tuple = load_dataset('''openai_humaneval''' )
UpperCAmelCase_ : Dict = load_metric('''code_eval''' )
UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
UpperCAmelCase_ : str = args.n_samples // args.batch_size
UpperCAmelCase_ : str = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase_ : Optional[Any] = DataLoader(_lowercase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(_lowercase , _lowercase )
UpperCAmelCase_ : int = complete_code(
_lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , )
if accelerator.is_main_process:
UpperCAmelCase_ : Any = []
for task in tqdm(range(_lowercase ) ):
UpperCAmelCase_ : int = human_eval['''test'''][task]['''test''']
UpperCAmelCase_ : str = f'''check({human_eval["test"][task]["entry_point"]})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase_, UpperCAmelCase_ : Any = code_eval_metric.compute(
references=_lowercase , predictions=_lowercase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(_lowercase , _lowercase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 30 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''linear'''
lowerCAmelCase = '''cosine'''
lowerCAmelCase = '''cosine_with_restarts'''
lowerCAmelCase = '''polynomial'''
lowerCAmelCase = '''constant'''
lowerCAmelCase = '''constant_with_warmup'''
lowerCAmelCase = '''piecewise_constant'''
def lowerCamelCase__ ( _lowercase , _lowercase = -1 ):
'''simple docstring'''
return LambdaLR(_lowercase , lambda _lowercase : 1 , last_epoch=_lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = -1 ):
'''simple docstring'''
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1.0 , _lowercase ) )
return 1.0
return LambdaLR(_lowercase , _lowercase , last_epoch=_lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = -1 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Tuple = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = rule_str.split(''':''' )
UpperCAmelCase_ : List[Any] = int(_lowercase )
UpperCAmelCase_ : Optional[int] = float(_lowercase )
UpperCAmelCase_ : List[str] = value
UpperCAmelCase_ : Optional[Any] = float(rule_list[-1] )
def create_rules_function(_lowercase , _lowercase ):
def rule_func(_lowercase ) -> float:
UpperCAmelCase_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ : Dict = create_rules_function(_lowercase , _lowercase )
return LambdaLR(_lowercase , _lowercase , last_epoch=_lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=-1 ):
'''simple docstring'''
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowercase , _lowercase , _lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = 0.5 , _lowercase = -1 ):
'''simple docstring'''
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
UpperCAmelCase_ : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowercase ) * 2.0 * progress )) )
return LambdaLR(_lowercase , _lowercase , _lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = 1 , _lowercase = -1 ):
'''simple docstring'''
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
UpperCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowercase ) * progress) % 1.0) )) )
return LambdaLR(_lowercase , _lowercase , _lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=1E-7 , _lowercase=1.0 , _lowercase=-1 ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ : Tuple = lr_init - lr_end
UpperCAmelCase_ : Tuple = num_training_steps - num_warmup_steps
UpperCAmelCase_ : List[str] = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ : str = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowercase , _lowercase , _lowercase )
__a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = 1 , _lowercase = 1.0 , _lowercase = -1 , ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = SchedulerType(_lowercase )
UpperCAmelCase_ : Optional[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowercase , last_epoch=_lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowercase , step_rules=_lowercase , last_epoch=_lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowercase , num_warmup_steps=_lowercase , last_epoch=_lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowercase , num_warmup_steps=_lowercase , num_training_steps=_lowercase , num_cycles=_lowercase , last_epoch=_lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowercase , num_warmup_steps=_lowercase , num_training_steps=_lowercase , power=_lowercase , last_epoch=_lowercase , )
return schedule_func(
_lowercase , num_warmup_steps=_lowercase , num_training_steps=_lowercase , last_epoch=_lowercase ) | 30 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__a = logging.get_logger(__name__)
__a = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''imagegpt'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=512 + 1 ,_SCREAMING_SNAKE_CASE=32 * 32 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : Union[str, Any] = n_embd
UpperCAmelCase_ : Any = n_layer
UpperCAmelCase_ : Optional[Any] = n_head
UpperCAmelCase_ : Union[str, Any] = n_inner
UpperCAmelCase_ : List[Any] = activation_function
UpperCAmelCase_ : List[str] = resid_pdrop
UpperCAmelCase_ : str = embd_pdrop
UpperCAmelCase_ : Optional[Any] = attn_pdrop
UpperCAmelCase_ : Dict = layer_norm_epsilon
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Dict = scale_attn_weights
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : List[str] = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ : Tuple = reorder_and_upcast_attn
UpperCAmelCase_ : int = tie_word_embeddings
super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
class __a( _a ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,) -> Mapping[str, Any]:
UpperCAmelCase_ : Any = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) )
return inputs | 30 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__a = random.Random()
def lowerCamelCase__ ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase_ : int = global_rng
UpperCAmelCase_ : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=2_000 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=16_000 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,) -> Dict:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : List[str] = min_seq_length
UpperCAmelCase_ : Union[str, Any] = max_seq_length
UpperCAmelCase_ : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase_ : str = feature_size
UpperCAmelCase_ : Optional[Any] = padding_value
UpperCAmelCase_ : Dict = sampling_rate
UpperCAmelCase_ : List[Any] = return_attention_mask
UpperCAmelCase_ : Optional[int] = do_normalize
def a__ ( self ) -> Optional[int]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ) -> Dict:
def _flatten(_SCREAMING_SNAKE_CASE ):
return list(itertools.chain(*_SCREAMING_SNAKE_CASE ) )
if equal_length:
UpperCAmelCase_ : int = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase_ : Tuple = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
UpperCAmelCase_ : Tuple = [np.asarray(_SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = WavaVecaFeatureExtractor
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = WavaVecaFeatureExtractionTester(self )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
self.assertTrue(np.all(np.mean(_SCREAMING_SNAKE_CASE ,axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_SCREAMING_SNAKE_CASE ,axis=0 ) - 1 ) < 1e-3 ) )
def a__ ( self ) -> str:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
UpperCAmelCase_ : List[Any] = [np.asarray(_SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = feat_extract(speech_inputs[0] ,return_tensors='''np''' ).input_values
UpperCAmelCase_ : Optional[Any] = feat_extract(np_speech_inputs[0] ,return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
# Test batched
UpperCAmelCase_ : Union[str, Any] = feat_extract(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ).input_values
UpperCAmelCase_ : List[Any] = feat_extract(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase_ : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase_ : int = np.asarray(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = feat_extract(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ).input_values
UpperCAmelCase_ : Optional[Any] = feat_extract(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ : Any = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
UpperCAmelCase_ : Tuple = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase_ : List[str] = [None, 1_600, None]
for max_length, padding in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = feat_extract(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ : Union[str, Any] = range(800 ,1_400 ,200 )
UpperCAmelCase_ : Tuple = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase_ : Any = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase_ : int = [None, 1_600, None]
for max_length, padding in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = feat_extract(_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
UpperCAmelCase_ : List[Any] = feat_extract(
_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=1_000 ,padding='''max_length''' ,return_tensors='''np''' )
UpperCAmelCase_ : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
UpperCAmelCase_ : Union[str, Any] = feat_extract(
_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=1_000 ,padding='''longest''' ,return_tensors='''np''' )
UpperCAmelCase_ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
UpperCAmelCase_ : int = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
UpperCAmelCase_ : Optional[Any] = feat_extract(
_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=2_000 ,padding='''longest''' ,return_tensors='''np''' )
UpperCAmelCase_ : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
@require_torch
def a__ ( self ) -> Any:
import torch
UpperCAmelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ : List[Any] = np.random.rand(100 ).astype(np.floataa )
UpperCAmelCase_ : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase_ : Optional[Any] = feature_extractor.pad([{'''input_values''': inputs}] ,return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase_ : Union[str, Any] = feature_extractor.pad([{'''input_values''': inputs}] ,return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def a__ ( self ) -> Dict:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCAmelCase_ : str = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask ,config.feat_extract_norm == '''layer''' ) | 30 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__a = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
__a = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase_ : Union[str, Any] = int(re.match(r'''.*layer_(\d*).*''' , _lowercase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase_ : Any = re.search(r'''[^\d](\d+)$''' , str(_lowercase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
UpperCAmelCase_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if bloom_config_file == "":
UpperCAmelCase_ : Tuple = BloomConfig()
else:
UpperCAmelCase_ : Optional[int] = BloomConfig.from_json_file(_lowercase )
if shard_model:
UpperCAmelCase_ : Any = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = {'''weight_map''': {}, '''metadata''': {}}
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = BloomConfig()
for j, file in enumerate(_lowercase ):
print('''Processing file: {}'''.format(_lowercase ) )
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : Tuple = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Any = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : Dict = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Union[str, Any] = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : Tuple = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : List[str] = tensors[key] / pretraining_tp
torch.save(
_lowercase , os.path.join(
_lowercase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase_ : Union[str, Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) )
UpperCAmelCase_ : List[Any] = BloomConfig()
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : List[str] = total_size
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_lowercase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Optional[Any] = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n'''
f.write(_lowercase )
else:
UpperCAmelCase_ : Any = BloomModel(_lowercase )
UpperCAmelCase_ : Tuple = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = None
for i, file in enumerate(_lowercase ):
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : List[Any] = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Optional[int] = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : str = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Dict = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : Dict = tensors[key] / pretraining_tp
UpperCAmelCase_ : Tuple = model.load_state_dict(_lowercase , strict=_lowercase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
UpperCAmelCase_ : Union[str, Any] = set(other_keys.missing_keys )
else:
UpperCAmelCase_ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCAmelCase_ : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
UpperCAmelCase_ : Optional[int] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _lowercase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
__a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 30 | 1 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : Any = tokenizer(example['''content'''] , truncation=_lowercase )['''input_ids''']
UpperCAmelCase_ : Optional[int] = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
__a = HfArgumentParser(PretokenizationArguments)
__a = parser.parse_args()
if args.num_workers is None:
__a = multiprocessing.cpu_count()
__a = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__a = time.time()
__a = load_dataset(args.dataset_name, split='train')
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
__a = time.time()
__a = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
__a = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""") | 30 |
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
for i in range(1 , 1001 ):
total += i**i
return str(_lowercase )[-10:]
if __name__ == "__main__":
print(solution()) | 30 | 1 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__a = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
__a = parser.parse_args()
__a = 'cpu'
__a = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
__a = 'path-to-your-trained-model'
__a = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__a = pipe.to(device)
# to channels last
__a = pipe.unet.to(memory_format=torch.channels_last)
__a = pipe.vae.to(memory_format=torch.channels_last)
__a = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__a = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__a = torch.randn(2, 4, 64, 64)
__a = torch.rand(1) * 999
__a = torch.randn(2, 77, 768)
__a = (sample, timestep, encoder_hidden_status)
try:
__a = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__a = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__a = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__a = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__a = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__a = 666
__a = torch.Generator(device).manual_seed(seed)
__a = {'generator': generator}
if args.steps is not None:
__a = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__a = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png') | 30 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__a = None
__a = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__a = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = None
# Automatically constructed
lowerCAmelCase = "PIL.Image.Image"
lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase = field(default='''Image''' , init=_a , repr=_a )
def __call__( self ) -> Tuple:
return self.pa_type
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Dict = path.split('''::''' )[-1]
try:
UpperCAmelCase_ : Optional[int] = string_to_dict(_SCREAMING_SNAKE_CASE ,config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase_ : Tuple = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
UpperCAmelCase_ : Optional[Any] = None
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ,use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
UpperCAmelCase_ : List[str] = BytesIO(f.read() )
UpperCAmelCase_ : Optional[Any] = PIL.Image.open(bytes_ )
else:
UpperCAmelCase_ : List[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCAmelCase_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase_ : Dict = storage.field('''bytes''' )
else:
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase_ : int = storage.field('''path''' )
else:
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f:
UpperCAmelCase_ : Any = f.read()
return bytes_
UpperCAmelCase_ : Union[str, Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
UpperCAmelCase_ : List[str] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,)
UpperCAmelCase_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def lowerCamelCase__ ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase_ : Optional[int] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase_ : int = image.format
else:
UpperCAmelCase_ : List[Any] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(_lowercase , format=_lowercase )
return buffer.getvalue()
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if hasattr(_lowercase , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
UpperCAmelCase_ : Tuple = array.dtype
UpperCAmelCase_ : List[str] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
UpperCAmelCase_ : Dict = dtype.kind
UpperCAmelCase_ : Union[str, Any] = dtype.itemsize
UpperCAmelCase_ : Optional[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase_ : Tuple = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase_ : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase_ : Union[str, Any] = dtype_byteorder + dtype_kind + str(_lowercase )
UpperCAmelCase_ : str = np.dtype(_lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCAmelCase_ : Any = PIL.Image.fromarray(array.astype(_lowercase ) )
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
UpperCAmelCase_, UpperCAmelCase_ : Tuple = first_non_null_value(_lowercase )
if isinstance(_lowercase , _lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowercase , np.ndarray ):
UpperCAmelCase_ : Any = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
elif isinstance(_lowercase , PIL.Image.Image ):
UpperCAmelCase_ : Union[str, Any] = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
else:
return objs
else:
return objs | 30 | 1 |
from __future__ import annotations
from fractions import Fraction
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : List[Any] = 11
UpperCAmelCase_ : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(_lowercase , _lowercase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowercase , _lowercase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
UpperCAmelCase_ : Dict = 10
return solutions
def lowerCamelCase__ ( _lowercase = 2 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 1.0
for fraction in fraction_list(_lowercase ):
UpperCAmelCase_ : List[str] = Fraction(_lowercase )
result *= frac.denominator / frac.numerator
return int(_lowercase )
if __name__ == "__main__":
print(solution()) | 30 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
UpperCAmelCase_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
UpperCAmelCase_ : Optional[int] = load_dataset('''nielsr/rvlcdip-demo''' )
UpperCAmelCase_ : Optional[Any] = dataset['''train'''][0]['''image'''].convert('''RGB''' )
UpperCAmelCase_ : str = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.logits
UpperCAmelCase_ : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.float ,)
self.assertTrue(torch.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) | 30 | 1 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__a = logging.getLogger()
__a = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __a( _a ):
"""simple docstring"""
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
os.makedirs(_SCREAMING_SNAKE_CASE ,exist_ok=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = {'''source''': '''What is love ?''', '''target''': '''life'''}
UpperCAmelCase_ : str = {'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase_ : List[str] = '''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(_SCREAMING_SNAKE_CASE ,f'''{split}.{field}''' ) ,'''w''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = "pytorch" ) -> Optional[int]:
UpperCAmelCase_ : str = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : Dict = os.path.join(_SCREAMING_SNAKE_CASE ,'''output''' )
UpperCAmelCase_ : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE ,'''data''' )
self._create_dummy_data(data_dir=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
UpperCAmelCase_ : Dict = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_SCREAMING_SNAKE_CASE ,env=self.get_env() )
UpperCAmelCase_ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE ,'''metrics.json''' )
with open(_SCREAMING_SNAKE_CASE ) as f:
UpperCAmelCase_ : Optional[int] = json.load(_SCREAMING_SNAKE_CASE )
return result
@require_torch_gpu
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Optional[int] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] ,0.2 )
@require_torch_multi_gpu
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] ,0.2 )
@require_torch_gpu
@require_ray
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Dict = self._run_finetune(gpus=1 ,distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] ,0.2 )
@require_torch_multi_gpu
@require_ray
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self._run_finetune(gpus=1 ,distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] ,0.2 ) | 30 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) | 30 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
__a = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : int = torch.load(_lowercase , map_location='''cpu''' )
return sd
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=rename_keys_prefix ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = OrderedDict()
UpperCAmelCase_ : List[str] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
UpperCAmelCase_ : Tuple = key
for name_pair in rename_keys_prefix:
UpperCAmelCase_ : Optional[int] = new_key.replace(name_pair[0] , name_pair[1] )
UpperCAmelCase_ : Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
UpperCAmelCase_ : Tuple = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
UpperCAmelCase_ : Dict = '''pretraining'''
if "vcr" in checkpoint_path:
UpperCAmelCase_ : Optional[int] = {'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
UpperCAmelCase_ : Any = {'''visual_embedding_dim''': 2048}
elif "vqa" in checkpoint_path:
UpperCAmelCase_ : Any = {'''visual_embedding_dim''': 2048}
elif "nlvr" in checkpoint_path:
UpperCAmelCase_ : str = {'''visual_embedding_dim''': 1024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
UpperCAmelCase_ : str = {'''visual_embedding_dim''': 512}
UpperCAmelCase_ : List[Any] = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
UpperCAmelCase_ : Optional[int] = {'''visual_embedding_dim''': 2048}
UpperCAmelCase_ : List[Any] = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
UpperCAmelCase_ : str = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129}
UpperCAmelCase_ : Union[str, Any] = '''vqa'''
elif "nlvr" in checkpoint_path:
UpperCAmelCase_ : Union[str, Any] = {
'''visual_embedding_dim''': 1024,
'''num_labels''': 2,
}
UpperCAmelCase_ : Optional[int] = '''nlvr'''
UpperCAmelCase_ : Dict = VisualBertConfig(**_lowercase )
# Load State Dict
UpperCAmelCase_ : Any = load_state_dict(_lowercase )
UpperCAmelCase_ : Any = get_new_dict(_lowercase , _lowercase )
if model_type == "pretraining":
UpperCAmelCase_ : Dict = VisualBertForPreTraining(_lowercase )
elif model_type == "vqa":
UpperCAmelCase_ : str = VisualBertForQuestionAnswering(_lowercase )
elif model_type == "nlvr":
UpperCAmelCase_ : Any = VisualBertForVisualReasoning(_lowercase )
elif model_type == "multichoice":
UpperCAmelCase_ : Union[str, Any] = VisualBertForMultipleChoice(_lowercase )
model.load_state_dict(_lowercase )
# Save Checkpoints
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
__a = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path) | 30 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0.9 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,) -> Optional[int]:
UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 30}
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Any = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize_and_center_crop
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : List[str] = crop_pct
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : Any = do_normalize
UpperCAmelCase_ : str = image_mean
UpperCAmelCase_ : List[Any] = image_std
def a__ ( self ) -> str:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def a__ ( self ) -> Dict:
UpperCAmelCase_ : str = PoolFormerImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''crop_pct''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_mean''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_std''' ) )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size ,{'''height''': 30, '''width''': 30} )
UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,) | 30 | 1 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
UpperCAmelCase_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
UpperCAmelCase_ : Optional[int] = load_dataset('''nielsr/rvlcdip-demo''' )
UpperCAmelCase_ : Optional[Any] = dataset['''train'''][0]['''image'''].convert('''RGB''' )
UpperCAmelCase_ : str = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.logits
UpperCAmelCase_ : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.float ,)
self.assertTrue(torch.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) | 30 |
import unittest
import numpy as np
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = None , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = np.shape(_lowercase )
UpperCAmelCase_ : Optional[Any] = np.shape(_lowercase )
UpperCAmelCase_ : Tuple = np.shape(_lowercase )
if shape_a[0] != shape_b[0]:
UpperCAmelCase_ : Tuple = (
'''Expected the same number of rows for A and B. '''
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(_lowercase )
if shape_b[1] != shape_c[1]:
UpperCAmelCase_ : List[Any] = (
'''Expected the same number of columns for B and C. '''
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(_lowercase )
UpperCAmelCase_ : Dict = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase_ : Any = np.linalg.inv(_lowercase )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : List[str] = np.array([[2, 1], [6, 3]] )
UpperCAmelCase_ : Tuple = schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.block([[a, b], [b.T, c]] )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = np.linalg.det(_SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE ,det_a * det_s )
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> None:
UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : int = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 30 | 1 |
def lowerCamelCase__ ( _lowercase = 1000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution()) | 30 |
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
UpperCAmelCase_ : Any = ''''''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
UpperCAmelCase_ : Any = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase_ : Union[str, Any] = B'''=''' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
UpperCAmelCase_ : int = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowercase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Tuple = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase , _lowercase ):
try:
UpperCAmelCase_ : Any = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
UpperCAmelCase_ : str = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase_ : List[Any] = encoded_data[:-padding]
UpperCAmelCase_ : List[Any] = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase_ : Tuple = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase_ : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowercase ) , 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
__a = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 0
@slow
def a__ ( self ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,20 )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained('''./''' ,tokenizer_type='''xxx''' )
@require_tokenizers
def a__ ( self ) -> Optional[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase_ : Any = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
else:
self.assertEqual(tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
@require_tokenizers
def a__ ( self ) -> List[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' ,):
UpperCAmelCase_ : int = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def a__ ( self ) -> Optional[Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
UpperCAmelCase_ : int = TOKENIZER_MAPPING.values()
UpperCAmelCase_ : List[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Tuple:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ,use_fast=_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = '''Hello, world. How are you?'''
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
@require_tokenizers
def a__ ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
self.assertEqual(tokenizer.vocab_size ,30_000 )
self.assertEqual(tokenizer.unk_token ,'''[UNK]''' )
self.assertEqual(tokenizer.padding_side ,'''right''' )
self.assertEqual(tokenizer.truncation_side ,'''right''' )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size ,12 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
# Check we can load the tokenizer config of an online model.
UpperCAmelCase_ : int = get_tokenizer_config('''bert-base-cased''' )
UpperCAmelCase_ : Optional[int] = config.pop('''_commit_hash''' ,_SCREAMING_SNAKE_CASE )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_SCREAMING_SNAKE_CASE ,{'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase_ : Any = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] ,'''BertTokenizer''' )
def a__ ( self ) -> str:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def a__ ( self ) -> int:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
# Can register in two steps
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = False
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = NewTokenizer
lowerCAmelCase = False
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
def a__ ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' )
def a__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' )
def a__ ( self ) -> Any:
# Make sure we have cached the tokenizer.
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 ) | 30 | 1 |
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if edge <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if edge <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 |
from functools import reduce
__a = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase__ ( _lowercase = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) )
for i in range(len(_lowercase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 30 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
UpperCAmelCase_ : Tuple = precision
UpperCAmelCase_ : Optional[Any] = ceil(precision / 14 )
UpperCAmelCase_ : int = 426880 * Decimal(10005 ).sqrt()
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : List[Any] = 13591409
UpperCAmelCase_ : Optional[Any] = Decimal(_lowercase )
for k in range(1 , _lowercase ):
UpperCAmelCase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__a = 50
print(F"""The first {n} digits of pi is: {pi(n)}""") | 30 | 1 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = GPTaTokenizer
lowerCAmelCase = GPTaTokenizerFast
lowerCAmelCase = True
lowerCAmelCase = {'''add_prefix_space''': True}
lowerCAmelCase = False
def a__ ( self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
UpperCAmelCase_ : Optional[Any] = dict(zip(_SCREAMING_SNAKE_CASE ,range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase_ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase_ : Optional[int] = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> str:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> str:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ : Optional[int] = '''lower newer'''
UpperCAmelCase_ : List[str] = '''lower newer'''
return input_text, output_text
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : List[str] = GPTaTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase_ : int = '''lower newer'''
UpperCAmelCase_ : Tuple = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCAmelCase_ : Dict = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ,add_prefix_space=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : int = self.get_rust_tokenizer(add_prefix_space=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = '''lower newer'''
# Testing tokenization
UpperCAmelCase_ : Dict = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ,add_prefix_space=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
UpperCAmelCase_ : Any = tokenizer.encode(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,add_prefix_space=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
UpperCAmelCase_ : str = self.get_rust_tokenizer(add_prefix_space=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = tokenizer.encode(_SCREAMING_SNAKE_CASE ,add_prefix_space=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Testing the unknown token
UpperCAmelCase_ : Dict = tokens + [rust_tokenizer.unk_token]
UpperCAmelCase_ : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
def a__ ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[str]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def a__ ( self ,_SCREAMING_SNAKE_CASE=15 ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
# Simple input
UpperCAmelCase_ : str = '''This is a simple input'''
UpperCAmelCase_ : str = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCAmelCase_ : int = ('''This is a simple input''', '''This is a pair''')
UpperCAmelCase_ : Any = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_SCREAMING_SNAKE_CASE ,tokenizer_r.encode ,_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding='''max_length''' )
# Simple input
self.assertRaises(_SCREAMING_SNAKE_CASE ,tokenizer_r.encode_plus ,_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding='''max_length''' )
# Simple input
self.assertRaises(
_SCREAMING_SNAKE_CASE ,tokenizer_r.batch_encode_plus ,_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding='''max_length''' ,)
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE ,tokenizer_r.encode ,_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding='''max_length''' )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE ,tokenizer_r.encode_plus ,_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding='''max_length''' )
# Pair input
self.assertRaises(
_SCREAMING_SNAKE_CASE ,tokenizer_r.batch_encode_plus ,_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding='''max_length''' ,)
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Tuple = GPTaTokenizer.from_pretrained(self.tmpdirname ,pad_token='''<pad>''' )
# Simple input
UpperCAmelCase_ : str = '''This is a simple input'''
UpperCAmelCase_ : Tuple = ['''This is a simple input looooooooong''', '''This is a simple input''']
UpperCAmelCase_ : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
UpperCAmelCase_ : Optional[Any] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
UpperCAmelCase_ : Union[str, Any] = tokenizer.pad_token_id
UpperCAmelCase_ : Dict = tokenizer(_SCREAMING_SNAKE_CASE ,padding='''max_length''' ,max_length=30 ,return_tensors='''np''' )
UpperCAmelCase_ : int = tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncate=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : Tuple = tokenizer(*_SCREAMING_SNAKE_CASE ,padding='''max_length''' ,max_length=60 ,return_tensors='''np''' )
UpperCAmelCase_ : List[Any] = tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncate=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def a__ ( self ) -> Any:
UpperCAmelCase_ : List[Any] = '''$$$'''
UpperCAmelCase_ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname ,bos_token=_SCREAMING_SNAKE_CASE ,add_bos_token=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = '''This is a simple input'''
UpperCAmelCase_ : Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCAmelCase_ : List[Any] = tokenizer.bos_token_id
UpperCAmelCase_ : List[str] = tokenizer(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = tokenizer(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] ,_SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
UpperCAmelCase_ : Tuple = tokenizer.decode(out_s.input_ids )
UpperCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,_SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def a__ ( self ) -> str:
pass
def a__ ( self ) -> int:
# TODO: change to self.get_tokenizers() when the fast version is implemented
UpperCAmelCase_ : Tuple = [self.get_tokenizer(do_lower_case=_SCREAMING_SNAKE_CASE ,add_bos_token=_SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase_ : List[Any] = '''Encode this.'''
UpperCAmelCase_ : List[Any] = '''This one too please.'''
UpperCAmelCase_ : Optional[int] = tokenizer.encode(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = tokenizer.encode_plus(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,return_special_tokens_mask=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : int = encoded_sequence_dict['''input_ids''']
UpperCAmelCase_ : Union[str, Any] = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : List[str] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_SCREAMING_SNAKE_CASE )
]
UpperCAmelCase_ : str = [x for x in filtered_sequence if x is not None]
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> List[Any]:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''facebook/opt-350m''' ,from_slow=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = '''A photo of a cat'''
UpperCAmelCase_ : Any = tokenizer.encode(
_SCREAMING_SNAKE_CASE ,)
self.assertEqual(_SCREAMING_SNAKE_CASE ,[2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained('''test_opt''' )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''./test_opt''' )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode(
_SCREAMING_SNAKE_CASE ,)
self.assertEqual(_SCREAMING_SNAKE_CASE ,[2, 250, 1_345, 9, 10, 4_758] )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''facebook/opt-350m''' ,use_slow=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = '''A photo of a cat'''
UpperCAmelCase_ : int = tokenizer.encode(
_SCREAMING_SNAKE_CASE ,)
# Same as above
self.assertEqual(_SCREAMING_SNAKE_CASE ,[2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' ,from_slow=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = '''bos'''
UpperCAmelCase_ : Dict = tokenizer.get_vocab()['''bos''']
UpperCAmelCase_ : Optional[int] = '''A photo of a cat'''
UpperCAmelCase_ : Any = tokenizer.encode(
_SCREAMING_SNAKE_CASE ,)
# We changed the bos token
self.assertEqual(_SCREAMING_SNAKE_CASE ,[31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained('''./tok''' )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
UpperCAmelCase_ : Tuple = tokenizer.encode(
_SCREAMING_SNAKE_CASE ,)
self.assertEqual(_SCREAMING_SNAKE_CASE ,[31_957, 250, 1_345, 9, 10, 4_758] ) | 30 |
from __future__ import annotations
import math
__a = '2020.9.26'
__a = 'xcodz-dot, cclaus, dhruvmanila'
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ):
UpperCAmelCase_ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(_lowercase )
UpperCAmelCase_ : Tuple = ((x * distance) / (z + distance)) * scale
UpperCAmelCase_ : str = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Axis must be a str''' )
UpperCAmelCase_ : Optional[Any] = locals()
del input_variables["axis"]
if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ):
UpperCAmelCase_ : List[Any] = (
'''Input values except axis must either be float or int: '''
f'''{list(input_variables.values() )}'''
)
raise TypeError(_lowercase )
UpperCAmelCase_ : Dict = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
UpperCAmelCase_ : Optional[int] = x * math.cos(_lowercase ) - y * math.sin(_lowercase )
UpperCAmelCase_ : List[Any] = y * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z
elif axis == "x":
UpperCAmelCase_ : Any = y * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : int = z * math.cos(_lowercase ) + y * math.sin(_lowercase )
UpperCAmelCase_ : Dict = x
elif axis == "y":
UpperCAmelCase_ : Union[str, Any] = x * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""") | 30 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__a = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__a = concatenate_datasets
__a = DownloadConfig
__a = DownloadManager
__a = DownloadMode
__a = DownloadConfig
__a = DownloadMode
__a = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 30 | 1 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__a = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __a( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase = None
def lowerCamelCase__ ( _lowercase , _lowercase , ):
'''simple docstring'''
import pyspark
def generate_fn():
UpperCAmelCase_ : Optional[int] = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
UpperCAmelCase_ : Union[str, Any] = df_with_partition_id.select('''*''' ).where(f'''part_id = {partition_id}''' ).drop('''part_id''' )
UpperCAmelCase_ : int = partition_df.collect()
UpperCAmelCase_ : str = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __a( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,) -> Dict:
UpperCAmelCase_ : str = df
UpperCAmelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCAmelCase_ : str = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self ) -> Dict:
yield from self.generate_examples_fn()
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> "SparkExamplesIterable":
UpperCAmelCase_ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(_SCREAMING_SNAKE_CASE )
return SparkExamplesIterable(self.df ,partition_order=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> "SparkExamplesIterable":
UpperCAmelCase_ : Dict = self.split_shard_indices_by_worker(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return SparkExamplesIterable(self.df ,partition_order=_SCREAMING_SNAKE_CASE )
@property
def a__ ( self ) -> int:
return len(self.partition_order )
class __a( datasets.DatasetBuilder ):
"""simple docstring"""
lowerCAmelCase = SparkConfig
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[Any]:
import pyspark
UpperCAmelCase_ : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCAmelCase_ : Union[str, Any] = df
UpperCAmelCase_ : Optional[Any] = working_dir
super().__init__(
cache_dir=_SCREAMING_SNAKE_CASE ,config_name=str(self.df.semanticHash() ) ,**_SCREAMING_SNAKE_CASE ,)
def a__ ( self ) -> int:
# Returns the path of the created file.
def create_cache_and_write_probe(_SCREAMING_SNAKE_CASE ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = os.path.join(self._cache_dir ,'''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(_SCREAMING_SNAKE_CASE ,'''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' ,'''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCAmelCase_ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(_SCREAMING_SNAKE_CASE ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def a__ ( self ) -> Tuple:
return datasets.DatasetInfo(features=self.config.features )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Any:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
import pyspark
def get_arrow_batch_size(_SCREAMING_SNAKE_CASE ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
UpperCAmelCase_ : Union[str, Any] = self.df.count()
UpperCAmelCase_ : Tuple = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCAmelCase_ : Tuple = (
self.df.limit(_SCREAMING_SNAKE_CASE )
.repartition(1 )
.mapInArrow(_SCREAMING_SNAKE_CASE ,'''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCAmelCase_ : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCAmelCase_ : List[Any] = min(_SCREAMING_SNAKE_CASE ,int(approx_total_size / max_shard_size ) )
UpperCAmelCase_ : List[str] = self.df.repartition(_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
UpperCAmelCase_ : Dict = ParquetWriter if file_format == '''parquet''' else ArrowWriter
UpperCAmelCase_ : Dict = os.path.join(self._working_dir ,os.path.basename(_SCREAMING_SNAKE_CASE ) ) if self._working_dir else fpath
UpperCAmelCase_ : List[Any] = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCAmelCase_ : Any = self.config.features
UpperCAmelCase_ : Any = self._writer_batch_size
UpperCAmelCase_ : Optional[Any] = self._fs.storage_options
def write_arrow(_SCREAMING_SNAKE_CASE ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCAmelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId()
UpperCAmelCase_ : List[str] = next(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=['''task_id''', '''num_examples''', '''num_bytes'''] ,)
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Optional[int] = writer_class(
features=_SCREAMING_SNAKE_CASE ,path=working_fpath.replace('''SSSSS''' ,f'''{shard_id:05d}''' ).replace('''TTTTT''' ,f'''{task_id:05d}''' ) ,writer_batch_size=_SCREAMING_SNAKE_CASE ,storage_options=_SCREAMING_SNAKE_CASE ,embed_local_files=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : str = pa.Table.from_batches([first_batch] )
writer.write_table(_SCREAMING_SNAKE_CASE )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCAmelCase_, UpperCAmelCase_ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=['''task_id''', '''num_examples''', '''num_bytes'''] ,)
shard_id += 1
UpperCAmelCase_ : Optional[int] = writer_class(
features=writer._features ,path=working_fpath.replace('''SSSSS''' ,f'''{shard_id:05d}''' ).replace('''TTTTT''' ,f'''{task_id:05d}''' ) ,writer_batch_size=_SCREAMING_SNAKE_CASE ,storage_options=_SCREAMING_SNAKE_CASE ,embed_local_files=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(_SCREAMING_SNAKE_CASE )
if writer._num_bytes > 0:
UpperCAmelCase_, UpperCAmelCase_ : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=['''task_id''', '''num_examples''', '''num_bytes'''] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Dict = os.path.join(os.path.dirname(_SCREAMING_SNAKE_CASE ) ,os.path.basename(_SCREAMING_SNAKE_CASE ) )
shutil.move(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = (
self.df.mapInArrow(_SCREAMING_SNAKE_CASE ,'''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) ,pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) ,pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) ,pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = "arrow" ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> int:
self._validate_cache_dir()
UpperCAmelCase_ : str = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = not is_remote_filesystem(self._fs )
UpperCAmelCase_ : Dict = os.path.join if is_local else posixpath.join
UpperCAmelCase_ : str = '''-TTTTT-SSSSS-of-NNNNN'''
UpperCAmelCase_ : List[Any] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
UpperCAmelCase_ : Optional[Any] = path_join(self._output_dir ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Dict = []
for task_id, content in self._prepare_split_single(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = total_num_examples
UpperCAmelCase_ : str = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
UpperCAmelCase_ : str = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCAmelCase_ : List[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,):
rename(
_SCREAMING_SNAKE_CASE ,fpath.replace('''SSSSS''' ,f'''{shard_id:05d}''' ).replace('''TTTTT''' ,f'''{task_id:05d}''' ) ,fpath.replace('''TTTTT-SSSSS''' ,f'''{global_shard_id:05d}''' ).replace('''NNNNN''' ,f'''{total_shards:05d}''' ) ,)
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Any = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_, UpperCAmelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(_SCREAMING_SNAKE_CASE ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(_SCREAMING_SNAKE_CASE ,len(_SCREAMING_SNAKE_CASE ) ).map(lambda _SCREAMING_SNAKE_CASE : _rename_shard(*_SCREAMING_SNAKE_CASE ) ).collect()
else:
# don't use any pattern
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : List[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' ,f'''{shard_id:05d}''' ).replace('''TTTTT''' ,f'''{task_id:05d}''' ) ,fpath.replace(_SCREAMING_SNAKE_CASE ,'''''' ) ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df ) | 30 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
while a != 0:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = b % a, a
return b
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if gcd(_lowercase , _lowercase ) != 1:
UpperCAmelCase_ : int = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowercase )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = 1, 0, a
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = 0, 1, m
while va != 0:
UpperCAmelCase_ : List[Any] = ua // va
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 30 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCAmelCase = '''CIDAS/clipseg-rd64-refined'''
lowerCAmelCase = '''image_segmenter'''
lowerCAmelCase = CLIPSegForImageSegmentation
lowerCAmelCase = ['''image''', '''text''']
lowerCAmelCase = ['''image''']
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self ,['''vision'''] )
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.pre_processor(text=[label] ,images=[image] ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
with torch.no_grad():
UpperCAmelCase_ : Dict = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : Dict = outputs.cpu().detach().numpy()
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) ) | 30 | 1 |
import os
from math import logaa
def lowerCamelCase__ ( _lowercase = "base_exp.txt" ):
'''simple docstring'''
UpperCAmelCase_ : float = 0
UpperCAmelCase_ : Union[str, Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_lowercase ) , _lowercase ) ) ):
UpperCAmelCase_, UpperCAmelCase_ : List[Any] = list(map(_lowercase , line.split(''',''' ) ) )
if x * logaa(_lowercase ) > largest:
UpperCAmelCase_ : Optional[Any] = x * logaa(_lowercase )
UpperCAmelCase_ : Dict = i + 1
return result
if __name__ == "__main__":
print(solution()) | 30 |
import numpy as np
import datasets
__a = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__a = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__a = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' ,id='''sequence''' ) ,id='''X''' ),
} ) ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# convert to numpy arrays
UpperCAmelCase_ : str = np.array(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.array(_SCREAMING_SNAKE_CASE )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
UpperCAmelCase_ : List[str] = X - np.mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = np.cov(reference_distribution.T )
try:
UpperCAmelCase_ : Any = np.linalg.inv(_SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
UpperCAmelCase_ : List[str] = np.linalg.pinv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = np.dot(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.dot(_SCREAMING_SNAKE_CASE ,X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 30 | 1 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [False] * len(_lowercase )
UpperCAmelCase_ : str = [-1] * len(_lowercase )
def dfs(_lowercase , _lowercase ):
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Tuple = c
for u in graph[v]:
if not visited[u]:
dfs(_lowercase , 1 - c )
for i in range(len(_lowercase ) ):
if not visited[i]:
dfs(_lowercase , 0 )
for i in range(len(_lowercase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__a = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph)) | 30 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__a = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__a = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_a )
class __a:
"""simple docstring"""
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
elif titles is None or texts is None:
UpperCAmelCase_ : List[str] = titles if texts is None else texts
return super().__call__(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[Any] = titles if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [titles]
UpperCAmelCase_ : List[str] = texts if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [texts]
UpperCAmelCase_ : Any = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = questions if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [questions] * n_passages
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.''' )
UpperCAmelCase_ : Tuple = super().__call__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : int = super().__call__(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : Optional[int] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ : Dict = attention_mask
return self.pad(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 4 ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = reader_input['''input_ids''']
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = reader_output[:3]
UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = sorted(range(_SCREAMING_SNAKE_CASE ) ,reverse=_SCREAMING_SNAKE_CASE ,key=relevance_logits.__getitem__ )
UpperCAmelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_ : List[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_SCREAMING_SNAKE_CASE ,top_spans=_SCREAMING_SNAKE_CASE ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_SCREAMING_SNAKE_CASE ,start_index=_SCREAMING_SNAKE_CASE ,end_index=_SCREAMING_SNAKE_CASE ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = []
for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ : int = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] ,reverse=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
UpperCAmelCase_ : str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = ['''input_ids''', '''attention_mask'''] | 30 | 1 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = ['model.decoder.embed_positions.weights']
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if "emb" in name:
UpperCAmelCase_ : str = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
UpperCAmelCase_ : List[Any] = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
UpperCAmelCase_ : Any = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
UpperCAmelCase_ : Tuple = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
UpperCAmelCase_ : Any = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
UpperCAmelCase_ : Optional[int] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
UpperCAmelCase_ : Any = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
UpperCAmelCase_ : int = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase_ : str = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = list(state_dict.keys() )
UpperCAmelCase_ : Any = {}
for key in keys:
UpperCAmelCase_ : List[Any] = state_dict.pop(_lowercase )
UpperCAmelCase_ : str = rename_keys(_lowercase )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase_ : Union[str, Any] = val[:hidden_size, :]
UpperCAmelCase_ : List[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase_ : Tuple = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase_ : Union[str, Any] = val
else:
UpperCAmelCase_ : int = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCAmelCase_ : str = 1024
UpperCAmelCase_ : int = 24
UpperCAmelCase_ : Any = 16
elif checkpoint == "medium":
UpperCAmelCase_ : Optional[int] = 1536
UpperCAmelCase_ : Optional[Any] = 48
UpperCAmelCase_ : List[Any] = 24
elif checkpoint == "large":
UpperCAmelCase_ : Tuple = 2048
UpperCAmelCase_ : Any = 48
UpperCAmelCase_ : List[Any] = 32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
UpperCAmelCase_ : str = MusicgenDecoderConfig(
hidden_size=_lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=_lowercase , num_attention_heads=_lowercase , )
return config
@torch.no_grad()
def lowerCamelCase__ ( _lowercase , _lowercase=None , _lowercase=None , _lowercase="cpu" ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = MusicGen.get_pretrained(_lowercase , device=_lowercase )
UpperCAmelCase_ : Union[str, Any] = decoder_config_from_checkpoint(_lowercase )
UpperCAmelCase_ : Union[str, Any] = fairseq_model.lm.state_dict()
UpperCAmelCase_, UpperCAmelCase_ : Tuple = rename_state_dict(
_lowercase , hidden_size=decoder_config.hidden_size )
UpperCAmelCase_ : Tuple = TaEncoderModel.from_pretrained('''t5-base''' )
UpperCAmelCase_ : int = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
UpperCAmelCase_ : Dict = MusicgenForCausalLM(_lowercase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase_, UpperCAmelCase_ : Any = decoder.load_state_dict(_lowercase , strict=_lowercase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_lowercase )
if len(_lowercase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(_lowercase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
UpperCAmelCase_ : str = MusicgenForConditionalGeneration(text_encoder=_lowercase , audio_encoder=_lowercase , decoder=_lowercase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_lowercase )
# check we can do a forward pass
UpperCAmelCase_ : List[str] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase_ : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase_ : Any = model(input_ids=_lowercase , decoder_input_ids=_lowercase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''t5-base''' )
UpperCAmelCase_ : int = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
UpperCAmelCase_ : Dict = MusicgenProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
# set the appropriate bos/pad token ids
UpperCAmelCase_ : Dict = 2048
UpperCAmelCase_ : List[Any] = 2048
# set other default generation config params
UpperCAmelCase_ : int = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Optional[Any] = 3.0
if pytorch_dump_folder is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(_lowercase )
processor.push_to_hub(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
__a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub) | 30 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 | 1 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=13 ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=99 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=37 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=None ,) -> Tuple:
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : Optional[int] = seq_length
UpperCAmelCase_ : Any = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Union[str, Any] = use_token_type_ids
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : Tuple = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : List[str] = num_labels
UpperCAmelCase_ : Optional[Any] = num_choices
UpperCAmelCase_ : List[str] = scope
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Dict = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : str = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> Optional[Any]:
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_SCREAMING_SNAKE_CASE ,initializer_range=self.initializer_range ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : Optional[int] = LlamaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> str:
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Dict = LlamaModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,encoder_hidden_states=_SCREAMING_SNAKE_CASE ,encoder_attention_mask=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Dict = model(
_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,encoder_hidden_states=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Dict:
UpperCAmelCase_ : int = LlamaForCausalLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> str:
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = LlamaForCausalLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
UpperCAmelCase_ : int = model(
_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,encoder_hidden_states=_SCREAMING_SNAKE_CASE ,encoder_attention_mask=_SCREAMING_SNAKE_CASE ,use_cache=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_ : int = torch.cat([input_ids, next_tokens] ,dim=-1 )
UpperCAmelCase_ : List[str] = torch.cat([input_mask, next_mask] ,dim=-1 )
UpperCAmelCase_ : str = model(
_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,encoder_hidden_states=_SCREAMING_SNAKE_CASE ,encoder_attention_mask=_SCREAMING_SNAKE_CASE ,output_hidden_states=_SCREAMING_SNAKE_CASE ,)['''hidden_states'''][0]
UpperCAmelCase_ : str = model(
_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,encoder_hidden_states=_SCREAMING_SNAKE_CASE ,encoder_attention_mask=_SCREAMING_SNAKE_CASE ,past_key_values=_SCREAMING_SNAKE_CASE ,output_hidden_states=_SCREAMING_SNAKE_CASE ,)['''hidden_states'''][0]
# select random slice
UpperCAmelCase_ : str = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a( _a , _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase = (LlamaForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = LlamaModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def a__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : List[str] = type
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_, UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : Optional[Any] = input_dict['''input_ids''']
UpperCAmelCase_ : List[Any] = input_ids.ne(1 ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : int = LlamaForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_, UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = 3
UpperCAmelCase_ : str = '''single_label_classification'''
UpperCAmelCase_ : List[Any] = input_dict['''input_ids''']
UpperCAmelCase_ : Tuple = input_ids.ne(1 ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : str = LlamaForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self ) -> int:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : Tuple = '''multi_label_classification'''
UpperCAmelCase_ : Optional[int] = input_dict['''input_ids''']
UpperCAmelCase_ : str = input_ids.ne(1 ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : str = LlamaForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def a__ ( self ) -> Union[str, Any]:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_, UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : str = ids_tensor([1, 10] ,config.vocab_size )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ : List[str] = LlamaModel(_SCREAMING_SNAKE_CASE )
original_model.to(_SCREAMING_SNAKE_CASE )
original_model.eval()
UpperCAmelCase_ : Any = original_model(_SCREAMING_SNAKE_CASE ).last_hidden_state
UpperCAmelCase_ : Optional[int] = original_model(_SCREAMING_SNAKE_CASE ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ : Optional[Any] = {'''type''': scaling_type, '''factor''': 10.0}
UpperCAmelCase_ : Any = LlamaModel(_SCREAMING_SNAKE_CASE )
scaled_model.to(_SCREAMING_SNAKE_CASE )
scaled_model.eval()
UpperCAmelCase_ : str = scaled_model(_SCREAMING_SNAKE_CASE ).last_hidden_state
UpperCAmelCase_ : Optional[Any] = scaled_model(_SCREAMING_SNAKE_CASE ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-5 ) )
@require_torch
class __a( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCAmelCase_ : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' ,device_map='''auto''' )
UpperCAmelCase_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
UpperCAmelCase_ : Optional[int] = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) ,_SCREAMING_SNAKE_CASE ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCAmelCase_ : List[Any] = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_SCREAMING_SNAKE_CASE ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def a__ ( self ) -> str:
UpperCAmelCase_ : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCAmelCase_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' ,device_map='''auto''' )
UpperCAmelCase_ : Union[str, Any] = model(torch.tensor(_SCREAMING_SNAKE_CASE ) )
# Expected mean on dim = -1
UpperCAmelCase_ : Optional[int] = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) ,_SCREAMING_SNAKE_CASE ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCAmelCase_ : List[Any] = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_SCREAMING_SNAKE_CASE ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCAmelCase_ : Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''auto''' )
UpperCAmelCase_ : Any = model(torch.tensor(_SCREAMING_SNAKE_CASE ) )
# Expected mean on dim = -1
UpperCAmelCase_ : List[Any] = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) ,_SCREAMING_SNAKE_CASE ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCAmelCase_ : Dict = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_SCREAMING_SNAKE_CASE ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCAmelCase_ : str = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' ,device_map='''auto''' )
UpperCAmelCase_ : Union[str, Any] = model(torch.tensor(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : int = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_SCREAMING_SNAKE_CASE ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
UpperCAmelCase_ : List[Any] = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_SCREAMING_SNAKE_CASE ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
UpperCAmelCase_ : List[Any] = '''Simply put, the theory of relativity states that '''
UpperCAmelCase_ : int = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
UpperCAmelCase_ : int = tokenizer.encode(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
UpperCAmelCase_ : List[str] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''sequential''' ,use_safetensors=_SCREAMING_SNAKE_CASE )
# greedy generation outputs
UpperCAmelCase_ : Optional[int] = model.generate(_SCREAMING_SNAKE_CASE ,max_new_tokens=64 ,top_p=_SCREAMING_SNAKE_CASE ,temperature=1 ,do_sample=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) | 30 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''wav2vec2'''
def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.05 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="sum" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1_500) ,_SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Tuple = feat_extract_norm
UpperCAmelCase_ : List[Any] = feat_extract_activation
UpperCAmelCase_ : str = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = conv_bias
UpperCAmelCase_ : str = num_conv_pos_embeddings
UpperCAmelCase_ : Any = num_conv_pos_embedding_groups
UpperCAmelCase_ : Tuple = len(self.conv_dim )
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : str = hidden_dropout
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Tuple = activation_dropout
UpperCAmelCase_ : List[str] = feat_proj_dropout
UpperCAmelCase_ : int = final_dropout
UpperCAmelCase_ : Union[str, Any] = layerdrop
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Optional[int] = do_stable_layer_norm
UpperCAmelCase_ : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : Optional[int] = apply_spec_augment
UpperCAmelCase_ : Tuple = mask_time_prob
UpperCAmelCase_ : Optional[Any] = mask_time_length
UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks
UpperCAmelCase_ : Optional[Any] = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Union[str, Any] = num_codevectors_per_group
UpperCAmelCase_ : Any = num_codevector_groups
UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature
UpperCAmelCase_ : List[str] = feat_quantizer_dropout
UpperCAmelCase_ : Dict = num_negatives
UpperCAmelCase_ : List[str] = codevector_dim
UpperCAmelCase_ : List[str] = proj_codevector_dim
UpperCAmelCase_ : str = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : List[Any] = ctc_loss_reduction
UpperCAmelCase_ : List[str] = ctc_zero_infinity
# adapter
UpperCAmelCase_ : Optional[Any] = add_adapter
UpperCAmelCase_ : Any = adapter_kernel_size
UpperCAmelCase_ : Optional[int] = adapter_stride
UpperCAmelCase_ : List[Any] = num_adapter_layers
UpperCAmelCase_ : Optional[Any] = output_hidden_size or hidden_size
UpperCAmelCase_ : Optional[int] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = xvector_output_dim
@property
def a__ ( self ) -> Any:
return functools.reduce(operator.mul ,self.conv_stride ,1 ) | 30 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = '''ylacombe/bark-small'''
UpperCAmelCase_ : int = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = '''en_speaker_1'''
UpperCAmelCase_ : List[str] = '''This is a test string'''
UpperCAmelCase_ : Any = '''speaker_embeddings_path.json'''
UpperCAmelCase_ : List[str] = '''speaker_embeddings'''
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> List[str]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> str:
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = BarkProcessor(tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
UpperCAmelCase_ : Dict = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
UpperCAmelCase_ : Tuple = 35
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : List[str] = 8
UpperCAmelCase_ : List[Any] = {
'''semantic_prompt''': np.ones(_SCREAMING_SNAKE_CASE ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : Dict = processor(text=self.input_string ,voice_preset=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(_SCREAMING_SNAKE_CASE ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,'''file.npz''' )
np.savez(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(_SCREAMING_SNAKE_CASE ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Dict = processor(text=self.input_string ,voice_preset=self.voice_preset )
def a__ ( self ) -> str:
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Any = BarkProcessor(tokenizer=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = processor(text=self.input_string )
UpperCAmelCase_ : Any = tokenizer(
self.input_string ,padding='''max_length''' ,max_length=256 ,add_special_tokens=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,return_token_type_ids=_SCREAMING_SNAKE_CASE ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() ) | 30 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = ShapEImgaImgPipeline
lowerCAmelCase = ['''image''']
lowerCAmelCase = ['''image''']
lowerCAmelCase = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase = False
@property
def a__ ( self ) -> Optional[int]:
return 32
@property
def a__ ( self ) -> str:
return 32
@property
def a__ ( self ) -> Any:
return self.time_input_dim * 4
@property
def a__ ( self ) -> Tuple:
return 8
@property
def a__ ( self ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
UpperCAmelCase_ : str = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
return model
@property
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : List[Any] = CLIPImageProcessor(
crop_size=224 ,do_center_crop=_SCREAMING_SNAKE_CASE ,do_normalize=_SCREAMING_SNAKE_CASE ,do_resize=_SCREAMING_SNAKE_CASE ,image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] ,image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] ,resample=3 ,size=224 ,)
return image_processor
@property
def a__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
UpperCAmelCase_ : str = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def a__ ( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase_ : List[str] = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = self.dummy_prior
UpperCAmelCase_ : str = self.dummy_image_encoder
UpperCAmelCase_ : Union[str, Any] = self.dummy_image_processor
UpperCAmelCase_ : Tuple = self.dummy_renderer
UpperCAmelCase_ : Optional[Any] = HeunDiscreteScheduler(
beta_schedule='''exp''' ,num_train_timesteps=1_024 ,prediction_type='''sample''' ,use_karras_sigmas=_SCREAMING_SNAKE_CASE ,clip_sample=_SCREAMING_SNAKE_CASE ,clip_sample_range=1.0 ,)
UpperCAmelCase_ : Optional[int] = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=0 ) -> Any:
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
UpperCAmelCase_ : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : List[str] = '''cpu'''
UpperCAmelCase_ : int = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Optional[Any] = output.images[0]
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase_ : Dict = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> int:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = torch_device == '''cpu'''
UpperCAmelCase_ : List[str] = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=_SCREAMING_SNAKE_CASE ,relax_max_difference=_SCREAMING_SNAKE_CASE ,)
def a__ ( self ) -> str:
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Any = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase_ : int = batch_size * [inputs[key]]
UpperCAmelCase_ : int = pipe(**_SCREAMING_SNAKE_CASE ,num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
UpperCAmelCase_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
UpperCAmelCase_ : str = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
UpperCAmelCase_ : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
UpperCAmelCase_ : str = pipe(
_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='''np''' ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) | 30 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1 ) -> Dict:
UpperCAmelCase_ : List[Any] = tokenizer
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
UpperCAmelCase_ : Optional[int] = n_copies
def __iter__( self ) -> Any:
UpperCAmelCase_ : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : str = start_length
UpperCAmelCase_ : Optional[int] = eof_strings
UpperCAmelCase_ : str = tokenizer
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCAmelCase_ : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = defaultdict(_lowercase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowercase ) ):
with torch.no_grad():
UpperCAmelCase_ : Dict = batch['''ids'''].shape[-1]
UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(_lowercase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase )
# each task is generated batch_size times
UpperCAmelCase_ : Union[str, Any] = batch['''task_id'''].repeat(_lowercase )
UpperCAmelCase_ : Dict = accelerator.pad_across_processes(
_lowercase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCAmelCase_, UpperCAmelCase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase_ : Union[str, Any] = generated_tokens.cpu().numpy()
UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowercase , _lowercase ):
gen_token_dict[task].append(_lowercase )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_lowercase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
code_gens[task].append(remove_last_block(_lowercase ) )
return code_gens
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_lowercase )
UpperCAmelCase_ : int = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase_ : List[Any] = '''false'''
if args.num_workers is None:
UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase_ : int = Accelerator()
set_seed(args.seed , device_specific=_lowercase )
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ : Any = tokenizer.eos_token
UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase_ : str = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase_ : Tuple = load_dataset('''openai_humaneval''' )
UpperCAmelCase_ : Dict = load_metric('''code_eval''' )
UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
UpperCAmelCase_ : str = args.n_samples // args.batch_size
UpperCAmelCase_ : str = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase_ : Optional[Any] = DataLoader(_lowercase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(_lowercase , _lowercase )
UpperCAmelCase_ : int = complete_code(
_lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , )
if accelerator.is_main_process:
UpperCAmelCase_ : Any = []
for task in tqdm(range(_lowercase ) ):
UpperCAmelCase_ : int = human_eval['''test'''][task]['''test''']
UpperCAmelCase_ : str = f'''check({human_eval["test"][task]["entry_point"]})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase_, UpperCAmelCase_ : Any = code_eval_metric.compute(
references=_lowercase , predictions=_lowercase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(_lowercase , _lowercase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 30 | 1 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__a = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
__a = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
__a = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
__a = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
__a = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Optional[Any]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) ,homepage='''https://github.com/openai/human-eval''' ,codebase_urls=['''https://github.com/openai/human-eval'''] ,reference_urls=['''https://github.com/openai/human-eval'''] ,license=_LICENSE ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=[1, 10, 100] ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=3.0 ) -> int:
if os.getenv('''HF_ALLOW_CODE_EVAL''' ,0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=_SCREAMING_SNAKE_CASE ) as executor:
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : List[Any] = Counter()
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Union[str, Any] = defaultdict(_SCREAMING_SNAKE_CASE )
for task_id, (candidates, test_case) in enumerate(zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ):
for candidate in candidates:
UpperCAmelCase_ : Union[str, Any] = candidate + '''\n''' + test_case
UpperCAmelCase_ : Any = (test_program, timeout, task_id, completion_id[task_id])
UpperCAmelCase_ : List[str] = executor.submit(_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE )
futures.append(_SCREAMING_SNAKE_CASE )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
UpperCAmelCase_, UpperCAmelCase_ : str = [], []
for result in results.values():
result.sort()
UpperCAmelCase_ : Dict = [r[1]['''passed'''] for r in result]
total.append(len(_SCREAMING_SNAKE_CASE ) )
correct.append(sum(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : List[Any] = np.array(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = np.array(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = k
UpperCAmelCase_ : str = {f'''pass@{k}''': estimate_pass_at_k(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
def estimator(_lowercase , _lowercase , _lowercase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Union[str, Any] = itertools.repeat(_lowercase , len(_lowercase ) )
else:
assert len(_lowercase ) == len(_lowercase )
UpperCAmelCase_ : Optional[Any] = iter(_lowercase )
return np.array([estimator(int(_lowercase ) , int(_lowercase ) , _lowercase ) for n, c in zip(_lowercase , _lowercase )] ) | 30 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__a = logging.get_logger(__name__)
__a = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''imagegpt'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=512 + 1 ,_SCREAMING_SNAKE_CASE=32 * 32 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : Union[str, Any] = n_embd
UpperCAmelCase_ : Any = n_layer
UpperCAmelCase_ : Optional[Any] = n_head
UpperCAmelCase_ : Union[str, Any] = n_inner
UpperCAmelCase_ : List[Any] = activation_function
UpperCAmelCase_ : List[str] = resid_pdrop
UpperCAmelCase_ : str = embd_pdrop
UpperCAmelCase_ : Optional[Any] = attn_pdrop
UpperCAmelCase_ : Dict = layer_norm_epsilon
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Dict = scale_attn_weights
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : List[str] = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ : Tuple = reorder_and_upcast_attn
UpperCAmelCase_ : int = tie_word_embeddings
super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
class __a( _a ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,) -> Mapping[str, Any]:
UpperCAmelCase_ : Any = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) )
return inputs | 30 | 1 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 13 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 2 ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = 128 ,_SCREAMING_SNAKE_CASE=[16, 32, 64, 128] ,_SCREAMING_SNAKE_CASE = 7 ,_SCREAMING_SNAKE_CASE = 4 ,_SCREAMING_SNAKE_CASE = 37 ,_SCREAMING_SNAKE_CASE = "gelu" ,_SCREAMING_SNAKE_CASE = 0.1 ,_SCREAMING_SNAKE_CASE = 0.1 ,_SCREAMING_SNAKE_CASE = 10 ,_SCREAMING_SNAKE_CASE = 0.02 ,_SCREAMING_SNAKE_CASE = 2 ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = 128 ,_SCREAMING_SNAKE_CASE = [2, 2, 2, 2] ,_SCREAMING_SNAKE_CASE = 2 ,_SCREAMING_SNAKE_CASE = 2 ,) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : List[Any] = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : List[str] = is_training
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = type_sequence_label_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[Any] = encoder_stride
UpperCAmelCase_ : Optional[int] = num_attention_outputs
UpperCAmelCase_ : Optional[Any] = embed_dim
UpperCAmelCase_ : Union[str, Any] = embed_dim + 1
UpperCAmelCase_ : Union[str, Any] = resolution
UpperCAmelCase_ : List[str] = depths
UpperCAmelCase_ : Optional[int] = hidden_sizes
UpperCAmelCase_ : int = dim
UpperCAmelCase_ : Optional[int] = mlp_expansion_ratio
def a__ ( self ) -> str:
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Any:
return EfficientFormerConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_SCREAMING_SNAKE_CASE ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,resolution=self.resolution ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,dim=self.dim ,mlp_expansion_ratio=self.mlp_expansion_ratio ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ : Optional[int] = TFEfficientFormerModel(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE ,training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.type_sequence_label_size
UpperCAmelCase_ : Dict = TFEfficientFormerForImageClassification(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = model(_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE ,training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : str = TFEfficientFormerForImageClassification(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Dict = model(_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def a__ ( self ) -> str:
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = config_and_inputs
UpperCAmelCase_ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = TFEfficientFormerModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(
self ,config_class=_SCREAMING_SNAKE_CASE ,has_text_modality=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def a__ ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''' )
def a__ ( self ) -> str:
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''' )
def a__ ( self ) -> Union[str, Any]:
pass
def a__ ( self ) -> Any:
UpperCAmelCase_, UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase_ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[Any]:
def check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,training=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : List[str] = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
if hasattr(self.model_tester ,'''encoder_seq_length''' ):
UpperCAmelCase_ : int = self.model_tester.encoder_seq_length
if hasattr(self.model_tester ,'''chunk_length''' ) and self.model_tester.chunk_length > 1:
UpperCAmelCase_ : Optional[Any] = seq_length * self.model_tester.chunk_length
else:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
if config.is_encoder_decoder:
UpperCAmelCase_ : Tuple = outputs.decoder_hidden_states
self.asseretIsInstance(_SCREAMING_SNAKE_CASE ,(list, tuple) )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = getattr(self.model_tester ,'''seq_length''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = getattr(self.model_tester ,'''decoder_seq_length''' ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) ,[decoder_seq_length, self.model_tester.hidden_size] ,)
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Tuple = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any:
UpperCAmelCase_ : Optional[int] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''' )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> str:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Union[str, Any] = TFEfficientFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : List[Any] = getattr(self.model_tester ,'''seq_length''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = getattr(self.model_tester ,'''encoder_seq_length''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = getattr(self.model_tester ,'''key_length''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = getattr(self.model_tester ,'''chunk_length''' ,_SCREAMING_SNAKE_CASE )
if chunk_length is not None and hasattr(self.model_tester ,'''num_hashes''' ):
UpperCAmelCase_ : List[str] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,training=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : int = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,training=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] ,)
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] ,)
def a__ ( self ) -> List[str]:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
UpperCAmelCase_, UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
UpperCAmelCase_ : str = model_class(_SCREAMING_SNAKE_CASE )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
UpperCAmelCase_ : Any = {
key: tf.keras.Input(shape=val.shape[1:] ,dtype=val.dtype ,name=_SCREAMING_SNAKE_CASE )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a__ ( self ) -> Tuple:
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''' )
if is_vision_available()
else None
)
@slow
def a__ ( self ) -> int:
UpperCAmelCase_ : int = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''' )
UpperCAmelCase_ : List[Any] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : str = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''tf''' )
# forward pass
UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE ,training=_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase_ : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
@slow
def a__ ( self ) -> str:
UpperCAmelCase_ : int = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''' )
UpperCAmelCase_ : str = self.default_image_processor
UpperCAmelCase_ : Optional[Any] = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''tf''' )
# forward pass
UpperCAmelCase_ : Any = model(**_SCREAMING_SNAKE_CASE ,training=_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase_ : Optional[Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) | 30 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__a = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
__a = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase_ : Union[str, Any] = int(re.match(r'''.*layer_(\d*).*''' , _lowercase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase_ : Any = re.search(r'''[^\d](\d+)$''' , str(_lowercase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
UpperCAmelCase_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if bloom_config_file == "":
UpperCAmelCase_ : Tuple = BloomConfig()
else:
UpperCAmelCase_ : Optional[int] = BloomConfig.from_json_file(_lowercase )
if shard_model:
UpperCAmelCase_ : Any = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = {'''weight_map''': {}, '''metadata''': {}}
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = BloomConfig()
for j, file in enumerate(_lowercase ):
print('''Processing file: {}'''.format(_lowercase ) )
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : Tuple = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Any = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : Dict = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Union[str, Any] = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : Tuple = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : List[str] = tensors[key] / pretraining_tp
torch.save(
_lowercase , os.path.join(
_lowercase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase_ : Union[str, Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) )
UpperCAmelCase_ : List[Any] = BloomConfig()
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : List[str] = total_size
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_lowercase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Optional[Any] = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n'''
f.write(_lowercase )
else:
UpperCAmelCase_ : Any = BloomModel(_lowercase )
UpperCAmelCase_ : Tuple = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = None
for i, file in enumerate(_lowercase ):
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : List[Any] = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Optional[int] = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : str = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Dict = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : Dict = tensors[key] / pretraining_tp
UpperCAmelCase_ : Tuple = model.load_state_dict(_lowercase , strict=_lowercase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
UpperCAmelCase_ : Union[str, Any] = set(other_keys.missing_keys )
else:
UpperCAmelCase_ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCAmelCase_ : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
UpperCAmelCase_ : Optional[int] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _lowercase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
__a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 30 | 1 |
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
__a = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""") | 30 |
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
for i in range(1 , 1001 ):
total += i**i
return str(_lowercase )[-10:]
if __name__ == "__main__":
print(solution()) | 30 | 1 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = len(_lowercase )
print('''The following activities are selected:''' )
# The first activity is always selected
UpperCAmelCase_ : int = 0
print(_lowercase , end=''',''' )
# Consider rest of the activities
for j in range(_lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_lowercase , end=''',''' )
UpperCAmelCase_ : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = [1, 3, 0, 5, 8, 5]
__a = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 30 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__a = None
__a = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__a = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = None
# Automatically constructed
lowerCAmelCase = "PIL.Image.Image"
lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase = field(default='''Image''' , init=_a , repr=_a )
def __call__( self ) -> Tuple:
return self.pa_type
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Dict = path.split('''::''' )[-1]
try:
UpperCAmelCase_ : Optional[int] = string_to_dict(_SCREAMING_SNAKE_CASE ,config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase_ : Tuple = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
UpperCAmelCase_ : Optional[Any] = None
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ,use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
UpperCAmelCase_ : List[str] = BytesIO(f.read() )
UpperCAmelCase_ : Optional[Any] = PIL.Image.open(bytes_ )
else:
UpperCAmelCase_ : List[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCAmelCase_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase_ : Dict = storage.field('''bytes''' )
else:
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase_ : int = storage.field('''path''' )
else:
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f:
UpperCAmelCase_ : Any = f.read()
return bytes_
UpperCAmelCase_ : Union[str, Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
UpperCAmelCase_ : List[str] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,)
UpperCAmelCase_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def lowerCamelCase__ ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase_ : Optional[int] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase_ : int = image.format
else:
UpperCAmelCase_ : List[Any] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(_lowercase , format=_lowercase )
return buffer.getvalue()
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if hasattr(_lowercase , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
UpperCAmelCase_ : Tuple = array.dtype
UpperCAmelCase_ : List[str] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
UpperCAmelCase_ : Dict = dtype.kind
UpperCAmelCase_ : Union[str, Any] = dtype.itemsize
UpperCAmelCase_ : Optional[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase_ : Tuple = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase_ : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase_ : Union[str, Any] = dtype_byteorder + dtype_kind + str(_lowercase )
UpperCAmelCase_ : str = np.dtype(_lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCAmelCase_ : Any = PIL.Image.fromarray(array.astype(_lowercase ) )
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
UpperCAmelCase_, UpperCAmelCase_ : Tuple = first_non_null_value(_lowercase )
if isinstance(_lowercase , _lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowercase , np.ndarray ):
UpperCAmelCase_ : Any = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
elif isinstance(_lowercase , PIL.Image.Image ):
UpperCAmelCase_ : Union[str, Any] = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
else:
return objs
else:
return objs | 30 | 1 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ) -> List[str]:
super().__init__()
UpperCAmelCase_ : Any = nn.Linear(3 ,4 )
UpperCAmelCase_ : int = nn.BatchNormad(4 )
UpperCAmelCase_ : List[Any] = nn.Linear(4 ,5 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
return self.lineara(self.batchnorm(self.lineara(_SCREAMING_SNAKE_CASE ) ) )
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : int = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_SCREAMING_SNAKE_CASE ,model.state_dict() )
UpperCAmelCase_ : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE ,'''index.json''' )
self.assertTrue(os.path.isfile(_SCREAMING_SNAKE_CASE ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCAmelCase_ : Dict = os.path.join(_SCREAMING_SNAKE_CASE ,f'''{key}.dat''' )
self.assertTrue(os.path.isfile(_SCREAMING_SNAKE_CASE ) )
# TODO: add tests on the fact weights are properly loaded
def a__ ( self ) -> int:
UpperCAmelCase_ : List[str] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCAmelCase_ : int = torch.randn(2 ,3 ,dtype=_SCREAMING_SNAKE_CASE )
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Any = offload_weight(_SCREAMING_SNAKE_CASE ,'''weight''' ,_SCREAMING_SNAKE_CASE ,{} )
UpperCAmelCase_ : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE ,'''weight.dat''' )
self.assertTrue(os.path.isfile(_SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{'''weight''': {'''shape''': [2, 3], '''dtype''': str(_SCREAMING_SNAKE_CASE ).split('''.''' )[1]}} )
UpperCAmelCase_ : Tuple = load_offloaded_weight(_SCREAMING_SNAKE_CASE ,index['''weight'''] )
self.assertTrue(torch.equal(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : int = ModelForTest()
UpperCAmelCase_ : int = model.state_dict()
UpperCAmelCase_ : int = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
UpperCAmelCase_ : str = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = OffloadedWeightsLoader(state_dict=_SCREAMING_SNAKE_CASE ,save_folder=_SCREAMING_SNAKE_CASE )
# Every key is there with the right value
self.assertEqual(sorted(_SCREAMING_SNAKE_CASE ) ,sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE ,weight_map[key] ) )
UpperCAmelCase_ : List[Any] = {k: v for k, v in state_dict.items() if '''weight''' in k}
UpperCAmelCase_ : Tuple = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = OffloadedWeightsLoader(state_dict=_SCREAMING_SNAKE_CASE ,save_folder=_SCREAMING_SNAKE_CASE )
# Every key is there with the right value
self.assertEqual(sorted(_SCREAMING_SNAKE_CASE ) ,sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE ,weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Duplicates are removed
UpperCAmelCase_ : Optional[int] = OffloadedWeightsLoader(state_dict=_SCREAMING_SNAKE_CASE ,save_folder=_SCREAMING_SNAKE_CASE )
# Every key is there with the right value
self.assertEqual(sorted(_SCREAMING_SNAKE_CASE ) ,sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE ,weight_map[key] ) )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
UpperCAmelCase_ : List[str] = extract_submodules_state_dict(_SCREAMING_SNAKE_CASE ,['''a.1''', '''a.2'''] )
self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{'''a.1''': 0, '''a.2''': 2} )
UpperCAmelCase_ : Dict = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
UpperCAmelCase_ : Optional[Any] = extract_submodules_state_dict(_SCREAMING_SNAKE_CASE ,['''a.1''', '''a.2'''] )
self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{'''a.1.a''': 0, '''a.2.a''': 2} ) | 30 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
UpperCAmelCase_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
UpperCAmelCase_ : Optional[int] = load_dataset('''nielsr/rvlcdip-demo''' )
UpperCAmelCase_ : Optional[Any] = dataset['''train'''][0]['''image'''].convert('''RGB''' )
UpperCAmelCase_ : str = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.logits
UpperCAmelCase_ : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.float ,)
self.assertTrue(torch.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) | 30 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) | 30 | 1 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=1024 ):
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_ : List[str] = [], []
UpperCAmelCase_ : Optional[Any] = list(zip(_lowercase , _lowercase ) )
UpperCAmelCase_, UpperCAmelCase_ : int = sorted_examples[0]
def is_too_big(_lowercase ):
return tok(_lowercase , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCAmelCase_ : int = new_src + ''' ''' + src
UpperCAmelCase_ : int = new_tgt + ''' ''' + tgt
if is_too_big(_lowercase ) or is_too_big(_lowercase ): # cant fit, finalize example
finished_src.append(_lowercase )
finished_tgt.append(_lowercase )
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = src, tgt
else: # can fit, keep adding
UpperCAmelCase_, UpperCAmelCase_ : str = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_lowercase )
finished_tgt.append(_lowercase )
return finished_src, finished_tgt
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = Path(_lowercase )
save_path.mkdir(exist_ok=_lowercase )
for split in ["train"]:
UpperCAmelCase_, UpperCAmelCase_ : Any = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
UpperCAmelCase_ : List[str] = [x.rstrip() for x in Path(_lowercase ).open().readlines()]
UpperCAmelCase_ : List[str] = [x.rstrip() for x in Path(_lowercase ).open().readlines()]
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = pack_examples(_lowercase , _lowercase , _lowercase , _lowercase )
print(f'''packed {split} split from {len(_lowercase )} examples -> {len(_lowercase )}.''' )
Path(save_path / f'''{split}.source''' ).open('''w''' ).write('''\n'''.join(_lowercase ) )
Path(save_path / f'''{split}.target''' ).open('''w''' ).write('''\n'''.join(_lowercase ) )
for split in ["val", "test"]:
UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(_lowercase , save_path / f'''{split}.source''' )
shutil.copyfile(_lowercase , save_path / f'''{split}.target''' )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=_lowercase , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=_lowercase , default=128 )
parser.add_argument('''--data_dir''' , type=_lowercase )
parser.add_argument('''--save_path''' , type=_lowercase )
UpperCAmelCase_ : Dict = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_lowercase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli() | 30 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0.9 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,) -> Optional[int]:
UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 30}
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Any = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize_and_center_crop
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : List[str] = crop_pct
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : Any = do_normalize
UpperCAmelCase_ : str = image_mean
UpperCAmelCase_ : List[Any] = image_std
def a__ ( self ) -> str:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def a__ ( self ) -> Dict:
UpperCAmelCase_ : str = PoolFormerImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''crop_pct''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_mean''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_std''' ) )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size ,{'''height''': 30, '''width''': 30} )
UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,) | 30 | 1 |
__a = 8.314462 # Unit - J mol-1 K-1
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod() | 30 |
import unittest
import numpy as np
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = None , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = np.shape(_lowercase )
UpperCAmelCase_ : Optional[Any] = np.shape(_lowercase )
UpperCAmelCase_ : Tuple = np.shape(_lowercase )
if shape_a[0] != shape_b[0]:
UpperCAmelCase_ : Tuple = (
'''Expected the same number of rows for A and B. '''
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(_lowercase )
if shape_b[1] != shape_c[1]:
UpperCAmelCase_ : List[Any] = (
'''Expected the same number of columns for B and C. '''
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(_lowercase )
UpperCAmelCase_ : Dict = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase_ : Any = np.linalg.inv(_lowercase )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : List[str] = np.array([[2, 1], [6, 3]] )
UpperCAmelCase_ : Tuple = schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.block([[a, b], [b.T, c]] )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = np.linalg.det(_SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE ,det_a * det_s )
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> None:
UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : int = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 30 | 1 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (CMStochasticIterativeScheduler,)
lowerCAmelCase = 10
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def a__ ( self ) -> str:
UpperCAmelCase_ : str = 10
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = self.scheduler_classes[0](**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = scheduler.timesteps[0]
UpperCAmelCase_ : Optional[int] = scheduler.timesteps[1]
UpperCAmelCase_ : int = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Any = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def a__ ( self ) -> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : int = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = 1
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = scheduler.timesteps
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_SCREAMING_SNAKE_CASE ):
# 1. scale model input
UpperCAmelCase_ : Union[str, Any] = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# 2. predict noise residual
UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : List[Any] = pred_prev_sample
UpperCAmelCase_ : Dict = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1e-2
assert abs(result_mean.item() - 0.25_10 ) < 1e-3
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : List[Any] = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = scheduler.timesteps
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = self.dummy_model()
UpperCAmelCase_ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCAmelCase_ : Dict = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# 2. predict noise residual
UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Optional[int] = pred_prev_sample
UpperCAmelCase_ : Optional[int] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1e-2
assert abs(result_mean.item() - 0.45_27 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[Any] = self.get_scheduler_config()
UpperCAmelCase_ : Any = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = [39, 30, 12, 15, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE ,msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase_ : Dict = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [39, 30, 12, 1, 0]
UpperCAmelCase_ : List[Any] = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE ,msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE ,timesteps=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Any = self.scheduler_classes[0]
UpperCAmelCase_ : List[Any] = self.get_scheduler_config()
UpperCAmelCase_ : Any = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE ,msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' ,):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) | 30 |
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
UpperCAmelCase_ : Any = ''''''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
UpperCAmelCase_ : Any = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase_ : Union[str, Any] = B'''=''' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
UpperCAmelCase_ : int = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowercase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Tuple = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase , _lowercase ):
try:
UpperCAmelCase_ : Any = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
UpperCAmelCase_ : str = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase_ : List[Any] = encoded_data[:-padding]
UpperCAmelCase_ : List[Any] = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase_ : Tuple = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase_ : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowercase ) , 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''xglm'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''num_attention_heads''': '''attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=256_008 ,_SCREAMING_SNAKE_CASE=2_048 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=4_096 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=2 ,**_SCREAMING_SNAKE_CASE ,) -> Any:
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : Dict = d_model
UpperCAmelCase_ : Union[str, Any] = ffn_dim
UpperCAmelCase_ : Dict = num_layers
UpperCAmelCase_ : Dict = attention_heads
UpperCAmelCase_ : Any = activation_function
UpperCAmelCase_ : Optional[int] = dropout
UpperCAmelCase_ : Tuple = attention_dropout
UpperCAmelCase_ : str = activation_dropout
UpperCAmelCase_ : Union[str, Any] = layerdrop
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase_ : str = use_cache
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,decoder_start_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) | 30 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 0
@slow
def a__ ( self ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,20 )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained('''./''' ,tokenizer_type='''xxx''' )
@require_tokenizers
def a__ ( self ) -> Optional[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase_ : Any = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
else:
self.assertEqual(tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
@require_tokenizers
def a__ ( self ) -> List[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' ,):
UpperCAmelCase_ : int = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def a__ ( self ) -> Optional[Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
UpperCAmelCase_ : int = TOKENIZER_MAPPING.values()
UpperCAmelCase_ : List[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Tuple:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ,use_fast=_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = '''Hello, world. How are you?'''
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
@require_tokenizers
def a__ ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
self.assertEqual(tokenizer.vocab_size ,30_000 )
self.assertEqual(tokenizer.unk_token ,'''[UNK]''' )
self.assertEqual(tokenizer.padding_side ,'''right''' )
self.assertEqual(tokenizer.truncation_side ,'''right''' )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size ,12 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
# Check we can load the tokenizer config of an online model.
UpperCAmelCase_ : int = get_tokenizer_config('''bert-base-cased''' )
UpperCAmelCase_ : Optional[int] = config.pop('''_commit_hash''' ,_SCREAMING_SNAKE_CASE )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_SCREAMING_SNAKE_CASE ,{'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase_ : Any = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] ,'''BertTokenizer''' )
def a__ ( self ) -> str:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def a__ ( self ) -> int:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
# Can register in two steps
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = False
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = NewTokenizer
lowerCAmelCase = False
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
def a__ ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' )
def a__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' )
def a__ ( self ) -> Any:
# Make sure we have cached the tokenizer.
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 ) | 30 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__a = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__a = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
__a = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='''http://www.cs.umd.edu/~snover/tercom/''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' ,id='''sequence''' ) ,id='''references''' ),
} ) ,codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] ,reference_urls=[
'''https://github.com/jhclark/tercom''',
] ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,) -> Optional[Any]:
UpperCAmelCase_ : Tuple = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
UpperCAmelCase_ : str = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
UpperCAmelCase_ : Tuple = TER(
normalized=_SCREAMING_SNAKE_CASE ,no_punct=_SCREAMING_SNAKE_CASE ,asian_support=_SCREAMING_SNAKE_CASE ,case_sensitive=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Optional[Any] = sb_ter.corpus_score(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length} | 30 |
from functools import reduce
__a = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase__ ( _lowercase = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) )
for i in range(len(_lowercase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 30 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
UpperCAmelCase_ : Tuple = precision
UpperCAmelCase_ : Optional[Any] = ceil(precision / 14 )
UpperCAmelCase_ : int = 426880 * Decimal(10005 ).sqrt()
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : List[Any] = 13591409
UpperCAmelCase_ : Optional[Any] = Decimal(_lowercase )
for k in range(1 , _lowercase ):
UpperCAmelCase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__a = 50
print(F"""The first {n} digits of pi is: {pi(n)}""") | 30 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__a = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['PerceiverFeatureExtractor']
__a = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 |
from __future__ import annotations
import math
__a = '2020.9.26'
__a = 'xcodz-dot, cclaus, dhruvmanila'
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ):
UpperCAmelCase_ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(_lowercase )
UpperCAmelCase_ : Tuple = ((x * distance) / (z + distance)) * scale
UpperCAmelCase_ : str = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Axis must be a str''' )
UpperCAmelCase_ : Optional[Any] = locals()
del input_variables["axis"]
if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ):
UpperCAmelCase_ : List[Any] = (
'''Input values except axis must either be float or int: '''
f'''{list(input_variables.values() )}'''
)
raise TypeError(_lowercase )
UpperCAmelCase_ : Dict = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
UpperCAmelCase_ : Optional[int] = x * math.cos(_lowercase ) - y * math.sin(_lowercase )
UpperCAmelCase_ : List[Any] = y * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z
elif axis == "x":
UpperCAmelCase_ : Any = y * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : int = z * math.cos(_lowercase ) + y * math.sin(_lowercase )
UpperCAmelCase_ : Dict = x
elif axis == "y":
UpperCAmelCase_ : Union[str, Any] = x * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""") | 30 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''instructblip_vision_model'''
def __init__( self ,_SCREAMING_SNAKE_CASE=1_408 ,_SCREAMING_SNAKE_CASE=6_144 ,_SCREAMING_SNAKE_CASE=39 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=224 ,_SCREAMING_SNAKE_CASE=14 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=1e-6 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=1e-10 ,_SCREAMING_SNAKE_CASE=True ,**_SCREAMING_SNAKE_CASE ,) -> Any:
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : str = image_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = attention_dropout
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Any = qkv_bias
@classmethod
def a__ ( cls ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_ : Tuple = cls.get_config_dict(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
UpperCAmelCase_ : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''instructblip_qformer'''
def __init__( self ,_SCREAMING_SNAKE_CASE=30_522 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-12 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE="absolute" ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=1_408 ,**_SCREAMING_SNAKE_CASE ,) -> int:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : Tuple = position_embedding_type
UpperCAmelCase_ : int = cross_attention_frequency
UpperCAmelCase_ : Optional[int] = encoder_hidden_size
@classmethod
def a__ ( cls ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = cls.get_config_dict(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
UpperCAmelCase_ : str = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''instructblip'''
lowerCAmelCase = True
def __init__( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=32 ,**_SCREAMING_SNAKE_CASE ) -> List[str]:
super().__init__(**_SCREAMING_SNAKE_CASE )
if vision_config is None:
UpperCAmelCase_ : List[Any] = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
UpperCAmelCase_ : Tuple = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
UpperCAmelCase_ : str = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
UpperCAmelCase_ : List[str] = InstructBlipVisionConfig(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = InstructBlipQFormerConfig(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
UpperCAmelCase_ : List[Any] = CONFIG_MAPPING[text_model_type](**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self.text_config.tie_word_embeddings
UpperCAmelCase_ : List[Any] = self.text_config.is_encoder_decoder
UpperCAmelCase_ : int = num_query_tokens
UpperCAmelCase_ : Any = self.vision_config.hidden_size
UpperCAmelCase_ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase_ : Optional[Any] = 1.0
UpperCAmelCase_ : Any = 0.02
@classmethod
def a__ ( cls ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) -> Optional[Any]:
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**_SCREAMING_SNAKE_CASE ,)
def a__ ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Any = self.vision_config.to_dict()
UpperCAmelCase_ : List[str] = self.qformer_config.to_dict()
UpperCAmelCase_ : Dict = self.text_config.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output | 30 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__a = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__a = concatenate_datasets
__a = DownloadConfig
__a = DownloadManager
__a = DownloadMode
__a = DownloadConfig
__a = DownloadMode
__a = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 30 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.