code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = prime_factors(_lowerCAmelCase )
if is_square_free(_lowerCAmelCase ):
return -1 if len(_lowerCAmelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__: Tuple = logging.get_logger(__name__)
UpperCamelCase__: Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCamelCase__: Optional[int] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
UpperCamelCase__: Dict = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
UpperCamelCase__: Tuple = "▁"
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __snake_case : List[Any] , __snake_case : Tuple="<s>" , __snake_case : List[Any]="</s>" , __snake_case : int="</s>" , __snake_case : Any="<s>" , __snake_case : Optional[int]="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : Union[str, Any]="<mask>" , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Dict , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : int = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
UpperCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
UpperCAmelCase : Optional[int] = vocab_file
UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
UpperCAmelCase : int = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
UpperCAmelCase : Optional[Any] = len(self.sp_model ) - 1
UpperCAmelCase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A ( self : Tuple , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def A ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : Tuple = [self.sep_token_id]
UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self : Dict ) -> Optional[int]:
return len(self.sp_model )
def A ( self : List[str] ) -> Dict:
UpperCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : Optional[Any] , __snake_case : str ) -> List[str]:
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def A ( self : int , __snake_case : int ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase : Optional[Any] = self.sp_model.PieceToId(__snake_case )
return spm_id if spm_id else self.unk_token_id
def A ( self : int , __snake_case : Any ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__snake_case )
def A ( self : List[Any] , __snake_case : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : int = ''''''
UpperCAmelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__snake_case ) + token
UpperCAmelCase : str = True
UpperCAmelCase : List[str] = []
else:
current_sub_tokens.append(__snake_case )
UpperCAmelCase : Optional[int] = False
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def __getstate__( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = self.__dict__.copy()
UpperCAmelCase : Any = None
return state
def __setstate__( self : Optional[int] , __snake_case : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase : Optional[Any] = {}
UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : Optional[int] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : Union[str, Any] = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , '''wb''' ) as fi:
UpperCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
| 23 | 0 |
from __future__ import annotations
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
UpperCamelCase__: Tuple = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A ( cls : Union[str, Any] ) -> int:
UpperCAmelCase : Optional[Any] = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def A ( cls : List[str] ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A ( self : int ) -> Tuple:
UpperCAmelCase : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : Dict = FlaxBertModel(__snake_case )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
UpperCAmelCase : List[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__snake_case , repo_id='''test-model-flax''' , push_to_hub=__snake_case , use_auth_token=self._token )
UpperCAmelCase : str = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
UpperCAmelCase : Any = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Optional[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" )
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : Optional[Any] = FlaxBertModel(__snake_case )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase : List[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__snake_case , use_auth_token=self._token )
UpperCAmelCase : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase : Any = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" )
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ) -> Union[str, Any]:
UpperCAmelCase : str = True
UpperCAmelCase : int = flatten_dict(modela.params )
UpperCAmelCase : Dict = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase : Dict = False
return models_are_equal
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase : Dict = FlaxBertModel(__snake_case )
UpperCAmelCase : int = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__snake_case , __snake_case ) )
with self.assertRaises(__snake_case ):
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase : str = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertTrue(check_models_equal(__snake_case , __snake_case ) )
def A ( self : List[str] ) -> Dict:
UpperCAmelCase : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase : Dict = FlaxBertModel(__snake_case )
UpperCAmelCase : Optional[int] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__snake_case , __snake_case ) , max_shard_size='''10KB''' )
with self.assertRaises(__snake_case ):
UpperCAmelCase : Any = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertTrue(check_models_equal(__snake_case , __snake_case ) )
def A ( self : Optional[int] ) -> str:
UpperCAmelCase : Dict = '''bert'''
UpperCAmelCase : int = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__snake_case ):
UpperCAmelCase : Optional[Any] = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertIsNotNone(__snake_case )
def A ( self : Dict ) -> List[Any]:
UpperCAmelCase : Optional[int] = '''bert'''
UpperCAmelCase : int = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__snake_case ):
UpperCAmelCase : Dict = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertIsNotNone(__snake_case )
| 23 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
# General docstring
lowercase__ :int = "RegNetConfig"
# Base docstring
lowercase__ :Optional[int] = "facebook/regnet-y-040"
lowercase__ :List[str] = [1, 1088, 7, 7]
# Image classification docstring
lowercase__ :Dict = "facebook/regnet-y-040"
lowercase__ :Union[str, Any] = "tabby, tabby cat"
lowercase__ :List[str] = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase ( nn.Module ):
def __init__( self ,A__ ,A__ ,A__ = 3 ,A__ = 1 ,A__ = 1 ,A__ = "relu" ,):
super().__init__()
lowercase = nn.Convad(
__snake_case ,__snake_case ,kernel_size=__snake_case ,stride=__snake_case ,padding=kernel_size // 2 ,groups=__snake_case ,bias=__snake_case ,)
lowercase = nn.BatchNormad(__snake_case)
lowercase = ACTaFN[activation] if activation is not None else nn.Identity()
def A__ ( self ,A__):
lowercase = self.convolution(__snake_case)
lowercase = self.normalization(__snake_case)
lowercase = self.activation(__snake_case)
return hidden_state
class lowercase ( nn.Module ):
def __init__( self ,A__):
super().__init__()
lowercase = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act)
lowercase = config.num_channels
def A__ ( self ,A__):
lowercase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''')
lowercase = self.embedder(__snake_case)
return hidden_state
class lowercase ( nn.Module ):
def __init__( self ,A__ ,A__ ,A__ = 2):
super().__init__()
lowercase = nn.Convad(__snake_case ,__snake_case ,kernel_size=1 ,stride=__snake_case ,bias=__snake_case)
lowercase = nn.BatchNormad(__snake_case)
def A__ ( self ,A__):
lowercase = self.convolution(__snake_case)
lowercase = self.normalization(__snake_case)
return hidden_state
class lowercase ( nn.Module ):
def __init__( self ,A__ ,A__):
super().__init__()
lowercase = nn.AdaptiveAvgPoolad((1, 1))
lowercase = nn.Sequential(
nn.Convad(__snake_case ,__snake_case ,kernel_size=1) ,nn.ReLU() ,nn.Convad(__snake_case ,__snake_case ,kernel_size=1) ,nn.Sigmoid() ,)
def A__ ( self ,A__):
# b c h w -> b c 1 1
lowercase = self.pooler(__snake_case)
lowercase = self.attention(__snake_case)
lowercase = hidden_state * attention
return hidden_state
class lowercase ( nn.Module ):
def __init__( self ,A__ ,A__ ,A__ ,A__ = 1):
super().__init__()
lowercase = in_channels != out_channels or stride != 1
lowercase = max(1 ,out_channels // config.groups_width)
lowercase = (
RegNetShortCut(__snake_case ,__snake_case ,stride=__snake_case) if should_apply_shortcut else nn.Identity()
)
lowercase = nn.Sequential(
RegNetConvLayer(__snake_case ,__snake_case ,kernel_size=1 ,activation=config.hidden_act) ,RegNetConvLayer(__snake_case ,__snake_case ,stride=__snake_case ,groups=__snake_case ,activation=config.hidden_act) ,RegNetConvLayer(__snake_case ,__snake_case ,kernel_size=1 ,activation=__snake_case) ,)
lowercase = ACTaFN[config.hidden_act]
def A__ ( self ,A__):
lowercase = hidden_state
lowercase = self.layer(__snake_case)
lowercase = self.shortcut(__snake_case)
hidden_state += residual
lowercase = self.activation(__snake_case)
return hidden_state
class lowercase ( nn.Module ):
def __init__( self ,A__ ,A__ ,A__ ,A__ = 1):
super().__init__()
lowercase = in_channels != out_channels or stride != 1
lowercase = max(1 ,out_channels // config.groups_width)
lowercase = (
RegNetShortCut(__snake_case ,__snake_case ,stride=__snake_case) if should_apply_shortcut else nn.Identity()
)
lowercase = nn.Sequential(
RegNetConvLayer(__snake_case ,__snake_case ,kernel_size=1 ,activation=config.hidden_act) ,RegNetConvLayer(__snake_case ,__snake_case ,stride=__snake_case ,groups=__snake_case ,activation=config.hidden_act) ,RegNetSELayer(__snake_case ,reduced_channels=int(round(in_channels / 4))) ,RegNetConvLayer(__snake_case ,__snake_case ,kernel_size=1 ,activation=__snake_case) ,)
lowercase = ACTaFN[config.hidden_act]
def A__ ( self ,A__):
lowercase = hidden_state
lowercase = self.layer(__snake_case)
lowercase = self.shortcut(__snake_case)
hidden_state += residual
lowercase = self.activation(__snake_case)
return hidden_state
class lowercase ( nn.Module ):
def __init__( self ,A__ ,A__ ,A__ ,A__ = 2 ,A__ = 2 ,):
super().__init__()
lowercase = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowercase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__snake_case ,__snake_case ,__snake_case ,stride=__snake_case ,) ,*[layer(__snake_case ,__snake_case ,__snake_case) for _ in range(depth - 1)] ,)
def A__ ( self ,A__):
lowercase = self.layers(__snake_case)
return hidden_state
class lowercase ( nn.Module ):
def __init__( self ,A__):
super().__init__()
lowercase = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__snake_case ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,))
lowercase = zip(config.hidden_sizes ,config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(__snake_case ,config.depths[1:]):
self.stages.append(RegNetStage(__snake_case ,__snake_case ,__snake_case ,depth=__snake_case))
def A__ ( self ,A__ ,A__ = False ,A__ = True):
lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase = hidden_states + (hidden_state,)
lowercase = stage_module(__snake_case)
if output_hidden_states:
lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=__snake_case ,hidden_states=__snake_case)
class lowercase ( A__ ):
lowercase_ : Union[str, Any] =RegNetConfig
lowercase_ : Optional[int] ='''regnet'''
lowercase_ : Optional[int] ='''pixel_values'''
lowercase_ : int =True
def A__ ( self ,A__):
if isinstance(__snake_case ,nn.Convad):
nn.init.kaiming_normal_(module.weight ,mode='''fan_out''' ,nonlinearity='''relu''')
elif isinstance(__snake_case ,(nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight ,1)
nn.init.constant_(module.bias ,0)
def A__ ( self ,A__ ,A__=False):
if isinstance(__snake_case ,__snake_case):
lowercase = value
lowercase__ :List[str] = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowercase__ :Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , A__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowercase ( A__ ):
def __init__( self ,A__):
super().__init__(__snake_case)
lowercase = config
lowercase = RegNetEmbeddings(__snake_case)
lowercase = RegNetEncoder(__snake_case)
lowercase = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__snake_case ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def A__ ( self ,A__ ,A__ = None ,A__ = None):
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.embedder(__snake_case)
lowercase = self.encoder(
__snake_case ,output_hidden_states=__snake_case ,return_dict=__snake_case)
lowercase = encoder_outputs[0]
lowercase = self.pooler(__snake_case)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__snake_case ,pooler_output=__snake_case ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , A__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowercase ( A__ ):
def __init__( self ,A__):
super().__init__(__snake_case)
lowercase = config.num_labels
lowercase = RegNetModel(__snake_case)
# classification head
lowercase = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__snake_case ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def A__ ( self ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,):
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.regnet(__snake_case ,output_hidden_states=__snake_case ,return_dict=__snake_case)
lowercase = outputs.pooler_output if return_dict else outputs[1]
lowercase = self.classifier(__snake_case)
lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase = '''single_label_classification'''
else:
lowercase = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase = MSELoss()
if self.num_labels == 1:
lowercase = loss_fct(logits.squeeze() ,labels.squeeze())
else:
lowercase = loss_fct(__snake_case ,__snake_case)
elif self.config.problem_type == "single_label_classification":
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 ,self.num_labels) ,labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
lowercase = BCEWithLogitsLoss()
lowercase = loss_fct(__snake_case ,__snake_case)
if not return_dict:
lowercase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__snake_case ,logits=__snake_case ,hidden_states=outputs.hidden_states)
| 101 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : str , __snake_case : Union[str, Any]=2 , __snake_case : Optional[int]=8 , __snake_case : Any=True , __snake_case : Union[str, Any]=True , __snake_case : Dict=True , __snake_case : int=True , __snake_case : List[Any]=99 , __snake_case : str=16 , __snake_case : Tuple=5 , __snake_case : Tuple=2 , __snake_case : str=36 , __snake_case : Dict="gelu" , __snake_case : str=0.0 , __snake_case : Optional[int]=0.0 , __snake_case : Optional[int]=512 , __snake_case : Optional[Any]=16 , __snake_case : int=2 , __snake_case : int=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : str=None , ) -> Optional[int]:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : int = use_input_mask
UpperCAmelCase : Any = use_token_type_ids
UpperCAmelCase : str = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : str = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = num_labels
UpperCAmelCase : Optional[int] = num_choices
UpperCAmelCase : Any = scope
def A ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Dict = None
if self.use_token_type_ids:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : str = None
UpperCAmelCase : Tuple = None
UpperCAmelCase : int = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : int ) -> Tuple:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Optional[Any] = self.get_config()
UpperCAmelCase : int = 300
return config
def A ( self : Optional[Any] ) -> Any:
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A ( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] , __snake_case : int , __snake_case : Dict , __snake_case : Tuple , __snake_case : Optional[Any] ) -> List[str]:
UpperCAmelCase : int = MraModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
UpperCAmelCase : Optional[int] = model(__snake_case , token_type_ids=__snake_case )
UpperCAmelCase : Dict = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Optional[Any] , ) -> Tuple:
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = MraModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
UpperCAmelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , )
UpperCAmelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : int ) -> Any:
UpperCAmelCase : Dict = MraForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Tuple , __snake_case : Tuple , __snake_case : Dict , __snake_case : Dict , __snake_case : Any , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Tuple ) -> Optional[int]:
UpperCAmelCase : List[str] = MraForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : str , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> int:
UpperCAmelCase : int = self.num_labels
UpperCAmelCase : Union[str, Any] = MraForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : List[str] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : str , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict ) -> int:
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : List[str] = MraForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : str , __snake_case : int , __snake_case : Any , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : str , __snake_case : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.num_choices
UpperCAmelCase : int = MraForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : str ) -> Dict:
UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[str] = config_and_inputs
UpperCAmelCase : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = ()
def A ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = MraModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def A ( self : Optional[Any] ) -> str:
self.config_tester.run_common_tests()
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : Tuple ) -> Dict:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def A ( self : Tuple ) -> List[str]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def A ( self : int ) -> Dict:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def A ( self : Any ) -> Optional[int]:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def A ( self : Dict ) -> Any:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = MraModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='''MRA does not output attentions''' )
def A ( self : str ) -> Optional[Any]:
return
@require_torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Tuple ) -> List[Any]:
UpperCAmelCase : int = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
UpperCAmelCase : Optional[Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(__snake_case )[0]
UpperCAmelCase : Optional[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Any = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Optional[int] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
UpperCAmelCase : Dict = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(__snake_case )[0]
UpperCAmelCase : int = 50265
UpperCAmelCase : int = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def A ( self : str ) -> List[Any]:
UpperCAmelCase : List[Any] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
UpperCAmelCase : List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : Tuple = model(__snake_case )[0]
UpperCAmelCase : Optional[int] = 50265
UpperCAmelCase : Tuple = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Optional[int] = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
| 23 | 0 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_SCREAMING_SNAKE_CASE = "src/transformers"
_SCREAMING_SNAKE_CASE = "docs/source/en/tasks"
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase = f.readlines()
# Find the start prompt.
UpperCamelCase = 0
while not lines[start_index].startswith(_lowerCAmelCase ):
start_index += 1
start_index += 1
UpperCamelCase = start_index
while not lines[end_index].startswith(_lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_SCREAMING_SNAKE_CASE = direct_transformers_import(TRANSFORMERS_PATH)
_SCREAMING_SNAKE_CASE = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_SCREAMING_SNAKE_CASE = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def lowercase( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = TASK_GUIDE_TO_MODELS[task_guide]
UpperCamelCase = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_lowerCAmelCase , set() )
UpperCamelCase = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> Dict:
'''simple docstring'''
UpperCamelCase = _find_text_in_file(
filename=os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
UpperCamelCase = get_model_list_for_task(_lowerCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
""" to fix this.""" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 343 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Any ) -> str:
UpperCAmelCase : Any = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
UpperCAmelCase : int = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__snake_case ) , __snake_case )
def A ( self : int ) -> str:
UpperCAmelCase : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__snake_case ) , x.transpose() ) )
UpperCAmelCase : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self : str ) -> Union[str, Any]:
UpperCAmelCase : Any = np.random.randn(3 , 4 )
UpperCAmelCase : List[Any] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) , transpose(__snake_case ).numpy() ) )
UpperCAmelCase : Tuple = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Any = torch.tensor(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , transpose(__snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase : int = np.random.randn(3 , 4 )
UpperCAmelCase : Optional[int] = tf.constant(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) , transpose(__snake_case ).numpy() ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : str = tf.constant(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , transpose(__snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self : Tuple ) -> Any:
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = jnp.array(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) , np.asarray(transpose(__snake_case ) ) ) )
UpperCAmelCase : Dict = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : int = jnp.array(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(__snake_case , axes=(1, 2, 0) ) ) ) )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , np.reshape(__snake_case , (4, 3) ) ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , np.reshape(__snake_case , (12, 5) ) ) )
@require_torch
def A ( self : Union[str, Any] ) -> int:
UpperCAmelCase : Dict = np.random.randn(3 , 4 )
UpperCAmelCase : Optional[Any] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , reshape(__snake_case , (4, 3) ).numpy() ) )
UpperCAmelCase : List[str] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[Any] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , reshape(__snake_case , (12, 5) ).numpy() ) )
@require_tf
def A ( self : int ) -> List[str]:
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = tf.constant(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , reshape(__snake_case , (4, 3) ).numpy() ) )
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Optional[Any] = tf.constant(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , reshape(__snake_case , (12, 5) ).numpy() ) )
@require_flax
def A ( self : Any ) -> Dict:
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = jnp.array(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , np.asarray(reshape(__snake_case , (4, 3) ) ) ) )
UpperCAmelCase : Any = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Optional[Any] = jnp.array(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , np.asarray(reshape(__snake_case , (12, 5) ) ) ) )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__snake_case ) , np.squeeze(__snake_case ) ) )
UpperCAmelCase : str = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , np.squeeze(__snake_case , axis=2 ) ) )
@require_torch
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : List[str] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) , squeeze(__snake_case ).numpy() ) )
UpperCAmelCase : Any = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : str = torch.tensor(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , squeeze(__snake_case , axis=2 ).numpy() ) )
@require_tf
def A ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : int = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Optional[int] = tf.constant(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) , squeeze(__snake_case ).numpy() ) )
UpperCAmelCase : List[str] = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : Optional[int] = tf.constant(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , squeeze(__snake_case , axis=2 ).numpy() ) )
@require_flax
def A ( self : List[Any] ) -> Dict:
UpperCAmelCase : Optional[int] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : int = jnp.array(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) , np.asarray(squeeze(__snake_case ) ) ) )
UpperCAmelCase : str = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : int = jnp.array(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , np.asarray(squeeze(__snake_case , axis=2 ) ) ) )
def A ( self : Optional[Any] ) -> int:
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , np.expand_dims(__snake_case , axis=1 ) ) )
@require_torch
def A ( self : List[str] ) -> Tuple:
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : Tuple = torch.tensor(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , expand_dims(__snake_case , axis=1 ).numpy() ) )
@require_tf
def A ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
UpperCAmelCase : Any = tf.constant(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , expand_dims(__snake_case , axis=1 ).numpy() ) )
@require_flax
def A ( self : Any ) -> List[Any]:
UpperCAmelCase : List[str] = np.random.randn(3 , 4 )
UpperCAmelCase : str = jnp.array(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , np.asarray(expand_dims(__snake_case , axis=1 ) ) ) )
| 23 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__a = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __snake_case( _lowerCAmelCase ) -> List[str]:
if isinstance(_lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
snake_case__ : List[Any] = [image]
snake_case__ : str = [trans(img.convert("""RGB""" ) ) for img in image]
snake_case__ : List[Any] = torch.stack(_lowerCAmelCase )
return image
class UpperCAmelCase_ ( A__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case_ : int , snake_case_ : List[Any] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case__ : Union[str, Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__snake_case , scheduler=__snake_case )
def lowerCamelCase ( self : Tuple , snake_case_ : Union[str, Any] ):
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}" )
def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : List[Any] ):
# get the original timestep using init_timestep
snake_case__ : Optional[int] = min(int(num_inference_steps * strength ) , __snake_case )
snake_case__ : Tuple = max(num_inference_steps - init_timestep , 0 )
snake_case__ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : str=None ):
if not isinstance(__snake_case , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__snake_case )}" )
snake_case__ : Optional[Any] = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
snake_case__ : Optional[int] = init_latents.shape
snake_case__ : Tuple = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
print("""add noise to latents at timestep""" , __snake_case )
snake_case__ : List[Any] = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
snake_case__ : Any = init_latents
return latents
@torch.no_grad()
def __call__( self : Union[str, Any] , snake_case_ : Union[torch.FloatTensor, PIL.Image.Image] = None , snake_case_ : float = 0.8 , snake_case_ : int = 1 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : float = 0.0 , snake_case_ : int = 50 , snake_case_ : Optional[bool] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
self.check_inputs(__snake_case )
# 2. Preprocess image
snake_case__ : int = preprocess(__snake_case )
# 3. set timesteps
self.scheduler.set_timesteps(__snake_case , device=self.device )
snake_case__ : Union[str, Any] = self.get_timesteps(__snake_case , __snake_case , self.device )
snake_case__ : List[Any] = timesteps[:1].repeat(__snake_case )
# 4. Prepare latent variables
snake_case__ : Union[str, Any] = self.prepare_latents(__snake_case , __snake_case , __snake_case , self.unet.dtype , self.device , __snake_case )
snake_case__ : Dict = latents
# 5. Denoising loop
for t in self.progress_bar(__snake_case ):
# 1. predict noise model_output
snake_case__ : List[Any] = self.unet(__snake_case , __snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case__ : Any = self.scheduler.step(
__snake_case , __snake_case , __snake_case , eta=__snake_case , use_clipped_model_output=__snake_case , generator=__snake_case , ).prev_sample
snake_case__ : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case__ : Dict = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__snake_case )
| 35 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase__: Union[str, Any] = "examples/"
UpperCamelCase__: Optional[Any] = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
UpperCamelCase__: Optional[int] = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
UpperCamelCase__: List[Any] = "README.md"
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ) -> Optional[int]:
with open(_lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase : Optional[int] = f.read()
UpperCAmelCase , UpperCAmelCase : List[Any] = REPLACE_PATTERNS[pattern]
UpperCAmelCase : List[Any] = replace.replace('''VERSION''' , _lowerCAmelCase )
UpperCAmelCase : Optional[Any] = re_pattern.sub(_lowerCAmelCase , _lowerCAmelCase )
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Any ) -> Optional[int]:
for folder, directories, fnames in os.walk(_lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , pattern='''examples''' )
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : str=False ) -> List[str]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not patch:
update_version_in_examples(_lowerCAmelCase )
def snake_case_ ( ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase : Optional[int] = '''1. Want to contribute a new model?'''
with open(_lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase : Optional[Any] = f.readlines()
# Find the start of the list.
UpperCAmelCase : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase : Optional[int] = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_lowerCAmelCase )
def snake_case_ ( ) -> Optional[Any]:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase : Union[str, Any] = f.read()
UpperCAmelCase : int = REPLACE_PATTERNS['''init'''][0].search(_lowerCAmelCase ).groups()[0]
return packaging.version.parse(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase : Optional[int] = default_version.base_version
elif patch:
UpperCAmelCase : Union[str, Any] = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase : Union[str, Any] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase : Dict = input(f"""Which version are you releasing? [{default_version}]""" )
if len(_lowerCAmelCase ) == 0:
UpperCAmelCase : Tuple = default_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowerCAmelCase , patch=_lowerCAmelCase )
def snake_case_ ( ) -> Any:
UpperCAmelCase : List[Any] = get_version()
UpperCAmelCase : List[str] = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase : List[Any] = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase : Optional[int] = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_lowerCAmelCase ) == 0:
UpperCAmelCase : Dict = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowerCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase__: Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
UpperCamelCase__: Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 23 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_A = logging.get_logger(__name__)
class lowercase_ ( A__ ):
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 122 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
UpperCamelCase__: Tuple = numpy.array([0, 0])
UpperCamelCase__: Union[str, Any] = numpy.array([0.5, 0.8660254])
UpperCamelCase__: Dict = numpy.array([1, 0])
UpperCamelCase__: int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] , _lowerCAmelCase : int ) -> list[numpy.ndarray]:
UpperCAmelCase : Union[str, Any] = initial_vectors
for _ in range(_lowerCAmelCase ):
UpperCAmelCase : Union[str, Any] = iteration_step(_lowerCAmelCase )
return vectors
def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] ) -> list[numpy.ndarray]:
UpperCAmelCase : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCAmelCase : List[str] = vectors[i + 1]
new_vectors.append(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def snake_case_ ( _lowerCAmelCase : numpy.ndarray , _lowerCAmelCase : float ) -> numpy.ndarray:
UpperCAmelCase : List[str] = numpy.radians(_lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase : Tuple = numpy.cos(_lowerCAmelCase ), numpy.sin(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] ) -> None:
UpperCAmelCase : List[Any] = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCAmelCase , UpperCAmelCase : str = zip(*_lowerCAmelCase )
plt.plot(_lowerCAmelCase , _lowerCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__: List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 23 | 0 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class a_ ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : Optional[int]="" ,snake_case : List[Any]="train" ):
assert os.path.isdir(__snake_case )
SCREAMING_SNAKE_CASE =[]
SCREAMING_SNAKE_CASE =os.listdir(__snake_case )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
SCREAMING_SNAKE_CASE =os.path.join(__snake_case ,__snake_case )
if not os.path.isfile(__snake_case ):
continue
self.documents.append(__snake_case )
def __len__( self : Dict ):
return len(self.documents )
def __getitem__( self : Union[str, Any] ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =self.documents[idx]
SCREAMING_SNAKE_CASE =document_path.split('/' )[-1]
with open(__snake_case ,encoding='utf-8' ) as source:
SCREAMING_SNAKE_CASE =source.read()
SCREAMING_SNAKE_CASE =process_story(__snake_case )
return document_name, story_lines, summary_lines
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =list(filter(lambda lowerCAmelCase_ : len(_lowerCAmelCase ) != 0, [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
SCREAMING_SNAKE_CASE =[_add_missing_period(_lowerCAmelCase ) for line in nonempty_lines]
# gather article lines
SCREAMING_SNAKE_CASE =[]
SCREAMING_SNAKE_CASE =deque(_lowerCAmelCase )
while True:
try:
SCREAMING_SNAKE_CASE =lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(_lowerCAmelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
SCREAMING_SNAKE_CASE =list(filter(lambda lowerCAmelCase_ : not t.startswith('@highlight' ), _lowerCAmelCase ) )
return story_lines, summary_lines
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if len(_lowerCAmelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_lowerCAmelCase )) )
return sequence
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =torch.ones_like(_lowerCAmelCase )
SCREAMING_SNAKE_CASE =sequence == pad_token_id
SCREAMING_SNAKE_CASE =0
return mask
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[tokenizer.encode(_lowerCAmelCase ) for line in story_lines]
SCREAMING_SNAKE_CASE =[token for sentence in story_lines_token_ids for token in sentence]
SCREAMING_SNAKE_CASE =[tokenizer.encode(_lowerCAmelCase ) for line in summary_lines]
SCREAMING_SNAKE_CASE =[token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[]
for sequence in batch:
SCREAMING_SNAKE_CASE =-1
SCREAMING_SNAKE_CASE =[]
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_lowerCAmelCase )
return torch.tensor(_lowerCAmelCase )
| 334 |
'''simple docstring'''
from manim import *
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase : Tuple = [mem.copy() for i in range(6 )]
UpperCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase : Dict = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Any = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Union[str, Any] = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Optional[Any] = Text('''CPU''' , font_size=24 )
UpperCAmelCase : Union[str, Any] = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase : Union[str, Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : List[str] = Text('''GPU''' , font_size=24 )
UpperCAmelCase : Dict = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
UpperCAmelCase : int = [mem.copy() for i in range(6 )]
UpperCAmelCase : Union[str, Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : List[str] = Text('''Model''' , font_size=24 )
UpperCAmelCase : Tuple = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
UpperCAmelCase : Any = []
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
cpu_targs.append(__snake_case )
UpperCAmelCase : int = [mem.copy() for i in range(6 )]
UpperCAmelCase : int = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Any = Text('''Loaded Checkpoint''' , font_size=24 )
UpperCAmelCase : Union[str, Any] = Group(__snake_case , __snake_case ).arrange(__snake_case , aligned_edge=__snake_case , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase : str = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
UpperCAmelCase : Tuple = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase : List[Any] = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) , Write(__snake_case ) )
self.play(Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
UpperCAmelCase : Tuple = []
UpperCAmelCase : int = []
for i, rect in enumerate(__snake_case ):
UpperCAmelCase : Any = fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
first_animations.append(GrowFromCenter(__snake_case , run_time=1 ) )
UpperCAmelCase : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(*__snake_case )
self.wait()
| 23 | 0 |
import random
def UpperCamelCase_( _snake_case : list , _snake_case : str ):
"""simple docstring"""
__a =[], [], []
for element in data:
if element < pivot:
less.append(_lowerCAmelCase )
elif element > pivot:
greater.append(_lowerCAmelCase )
else:
equal.append(_lowerCAmelCase )
return less, equal, greater
def UpperCamelCase_( _snake_case : list , _snake_case : int ):
"""simple docstring"""
if index >= len(_lowerCAmelCase ) or index < 0:
return None
__a =items[random.randint(0 , len(_lowerCAmelCase ) - 1 )]
__a =0
__a =_partition(_lowerCAmelCase , _lowerCAmelCase )
__a =len(_lowerCAmelCase )
__a =len(_lowerCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowerCAmelCase , _lowerCAmelCase )
# must be in larger
else:
return quick_select(_lowerCAmelCase , index - (m + count) )
| 218 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
UpperCamelCase__: str = None
UpperCamelCase__: int = {
"7B": 11008,
"13B": 13824,
"30B": 17920,
"65B": 22016,
"70B": 28672,
}
UpperCamelCase__: List[Any] = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : List[Any]=256 ) -> Optional[Any]:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def snake_case_ ( _lowerCAmelCase : List[str] ) -> str:
with open(_lowerCAmelCase , '''r''' ) as f:
return json.load(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any ) -> Optional[Any]:
with open(_lowerCAmelCase , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Any=True ) -> List[Any]:
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCAmelCase : int = os.path.join(_lowerCAmelCase , '''tmp''' )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCAmelCase : List[str] = read_json(os.path.join(_lowerCAmelCase , '''params.json''' ) )
UpperCAmelCase : str = NUM_SHARDS[model_size]
UpperCAmelCase : Any = params['''n_layers''']
UpperCAmelCase : str = params['''n_heads''']
UpperCAmelCase : Any = n_heads // num_shards
UpperCAmelCase : List[str] = params['''dim''']
UpperCAmelCase : Optional[Any] = dim // n_heads
UpperCAmelCase : str = 1_0_0_0_0.0
UpperCAmelCase : Optional[int] = 1.0 / (base ** (torch.arange(0 , _lowerCAmelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
UpperCAmelCase : Tuple = params['''n_kv_heads'''] # for GQA / MQA
UpperCAmelCase : Optional[int] = n_heads_per_shard // num_key_value_heads
UpperCAmelCase : Optional[Any] = dim // num_key_value_heads
else: # compatibility with other checkpoints
UpperCAmelCase : List[str] = n_heads
UpperCAmelCase : Optional[int] = n_heads_per_shard
UpperCAmelCase : List[str] = dim
# permute for sliced rotary
def permute(_lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any]=n_heads , _lowerCAmelCase : int=dim , _lowerCAmelCase : Dict=dim ):
return w.view(_lowerCAmelCase , dima // n_heads // 2 , 2 , _lowerCAmelCase ).transpose(1 , 2 ).reshape(_lowerCAmelCase , _lowerCAmelCase )
print(f"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
UpperCAmelCase : int = torch.load(os.path.join(_lowerCAmelCase , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
UpperCAmelCase : Optional[Any] = [
torch.load(os.path.join(_lowerCAmelCase , f"""consolidated.{i:02d}.pth""" ) , map_location='''cpu''' )
for i in range(_lowerCAmelCase )
]
UpperCAmelCase : Any = 0
UpperCAmelCase : str = {'''weight_map''': {}}
for layer_i in range(_lowerCAmelCase ):
UpperCAmelCase : Optional[Any] = f"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
UpperCAmelCase : Optional[int] = {
f"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wq.weight"""] ),
f"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wk.weight"""] ),
f"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[f"""layers.{layer_i}.attention.wv.weight"""],
f"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[f"""layers.{layer_i}.attention.wo.weight"""],
f"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w1.weight"""],
f"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w2.weight"""],
f"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w3.weight"""],
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[f"""layers.{layer_i}.attention_norm.weight"""],
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[f"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
UpperCAmelCase : List[str] = {
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.attention_norm.weight"""
].clone(),
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
UpperCAmelCase : Union[str, Any] = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wq.weight"""].view(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for i in range(_lowerCAmelCase )
] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase : Optional[Any] = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wk.weight"""].view(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for i in range(_lowerCAmelCase )
] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
UpperCAmelCase : str = torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wv.weight"""].view(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for i in range(_lowerCAmelCase )
] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Optional[int] = torch.cat(
[loaded[i][f"""layers.{layer_i}.attention.wo.weight"""] for i in range(_lowerCAmelCase )] , dim=1 )
UpperCAmelCase : Any = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(_lowerCAmelCase )] , dim=0 )
UpperCAmelCase : str = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(_lowerCAmelCase )] , dim=1 )
UpperCAmelCase : Tuple = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(_lowerCAmelCase )] , dim=0 )
UpperCAmelCase : Any = inv_freq
for k, v in state_dict.items():
UpperCAmelCase : List[Any] = filename
param_count += v.numel()
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase : Optional[int] = f"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
UpperCAmelCase : str = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
UpperCAmelCase : Any = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(_lowerCAmelCase )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(_lowerCAmelCase )] , dim=0 ),
}
for k, v in state_dict.items():
UpperCAmelCase : Optional[int] = filename
param_count += v.numel()
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
# Write configs
UpperCAmelCase : Union[str, Any] = {'''total_size''': param_count * 2}
write_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , '''pytorch_model.bin.index.json''' ) )
UpperCAmelCase : int = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
UpperCAmelCase : Tuple = params['''multiple_of'''] if '''multiple_of''' in params else 256
UpperCAmelCase : Any = LlamaConfig(
hidden_size=_lowerCAmelCase , intermediate_size=compute_intermediate_size(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=_lowerCAmelCase , )
config.save_pretrained(_lowerCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_lowerCAmelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=_lowerCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(_lowerCAmelCase , safe_serialization=_lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ) -> List[str]:
# Initialize the tokenizer based on the `spm` model
UpperCAmelCase : Dict = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
UpperCAmelCase : List[Any] = tokenizer_class(_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
def snake_case_ ( ) -> List[Any]:
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=_lowerCAmelCase , help='''Whether or not to save using `safetensors`.''' )
UpperCAmelCase : List[Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
UpperCAmelCase : Optional[int] = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 23 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def UpperCamelCase_ ( snake_case_ : Any ) -> Tuple:
'''simple docstring'''
if "model" in orig_key:
__lowerCAmelCase = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
__lowerCAmelCase = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
__lowerCAmelCase = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
__lowerCAmelCase = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
__lowerCAmelCase = orig_key.split(""".""" )[0].split("""_""" )[-1]
__lowerCAmelCase = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
__lowerCAmelCase = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
__lowerCAmelCase = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
__lowerCAmelCase = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
__lowerCAmelCase = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
__lowerCAmelCase = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
__lowerCAmelCase = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
__lowerCAmelCase = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
__lowerCAmelCase = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
__lowerCAmelCase = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
__lowerCAmelCase = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
__lowerCAmelCase = '''yoso.''' + orig_key
return orig_key
def UpperCamelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Dict ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(_lowerCAmelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
__lowerCAmelCase = val
__lowerCAmelCase = orig_state_dict['''cls.predictions.decoder.bias''']
__lowerCAmelCase = torch.arange(_lowerCAmelCase ).expand((1, -1) ) + 2
return orig_state_dict
def UpperCamelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase = torch.load(_lowerCAmelCase , map_location="""cpu""" )['''model_state_dict''']
__lowerCAmelCase = YosoConfig.from_json_file(_lowerCAmelCase )
__lowerCAmelCase = YosoForMaskedLM(_lowerCAmelCase )
__lowerCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , _lowerCAmelCase )
print(model.load_state_dict(_lowerCAmelCase ) )
model.eval()
model.save_pretrained(_lowerCAmelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
_A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_A : List[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 229 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = cos(_lowerCAmelCase )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : Any = (1 - _cos) / 2
UpperCAmelCase : List[Any] = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : Dict = 1 - alpha
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : Any = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(_lowerCAmelCase )
UpperCAmelCase : Tuple = cos(_lowerCAmelCase )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Tuple = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : Optional[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : Optional[int] = sin(_lowerCAmelCase )
UpperCAmelCase : Tuple = cos(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = _sin / 2
UpperCAmelCase : Any = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : Optional[Any] = 1 + alpha
UpperCAmelCase : List[Any] = -2 * _cos
UpperCAmelCase : Optional[Any] = 1 - alpha
UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : List[str] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase )
UpperCAmelCase : str = cos(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 1 - alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase : Optional[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = cos(_lowerCAmelCase )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : str = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Union[str, Any] = -2 * _cos
UpperCAmelCase : Optional[Any] = 1 - alpha * big_a
UpperCAmelCase : Union[str, Any] = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : Any = 1 - alpha / big_a
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase : Any = tau * frequency / samplerate
UpperCAmelCase : Optional[int] = sin(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = cos(_lowerCAmelCase )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : Optional[int] = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : int = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Optional[int] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : str = 2 * sqrt(_lowerCAmelCase ) * alpha
UpperCAmelCase : Dict = big_a * (pmc + aaa)
UpperCAmelCase : Any = 2 * big_a * mpc
UpperCAmelCase : Union[str, Any] = big_a * (pmc - aaa)
UpperCAmelCase : Optional[int] = ppmc + aaa
UpperCAmelCase : Optional[Any] = -2 * pmpc
UpperCAmelCase : Optional[Any] = ppmc - aaa
UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = cos(_lowerCAmelCase )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : int = 10 ** (gain_db / 40)
UpperCAmelCase : List[str] = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(_lowerCAmelCase ) * alpha
UpperCAmelCase : Any = big_a * (ppmc + aaa)
UpperCAmelCase : str = -2 * big_a * pmpc
UpperCAmelCase : List[Any] = big_a * (ppmc - aaa)
UpperCAmelCase : Optional[Any] = pmc + aaa
UpperCAmelCase : Any = 2 * mpc
UpperCAmelCase : str = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 23 | 0 |
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 209 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ) -> bool:
UpperCAmelCase : str = get_failure_array(_lowerCAmelCase )
# 2) Step through text searching for pattern
UpperCAmelCase , UpperCAmelCase : Optional[Any] = 0, 0 # index into text, pattern
while i < len(_lowerCAmelCase ):
if pattern[j] == text[i]:
if j == (len(_lowerCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCAmelCase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( _lowerCAmelCase : str ) -> list[int]:
UpperCAmelCase : Optional[Any] = [0]
UpperCAmelCase : str = 0
UpperCAmelCase : List[str] = 1
while j < len(_lowerCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCAmelCase : Union[str, Any] = failure[i - 1]
continue
j += 1
failure.append(_lowerCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
UpperCamelCase__: str = "abc1abc12"
UpperCamelCase__: str = "alskfjaldsabc1abc1abc12k23adsfabcabc"
UpperCamelCase__: Any = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCamelCase__: Tuple = "ABABX"
UpperCamelCase__: Union[str, Any] = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
UpperCamelCase__: Any = "AAAB"
UpperCamelCase__: str = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
UpperCamelCase__: int = "abcdabcy"
UpperCamelCase__: Any = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
UpperCamelCase__: List[str] = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 23 | 0 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def lowercase ( self : Dict ):
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=__snake_case , )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def lowercase ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__snake_case )
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def lowercase ( self : Tuple ):
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=__snake_case , )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def lowercase ( self : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : str ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__snake_case )
def _UpperCAmelCase ( ) -> Optional[Any]:
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def _UpperCAmelCase ( ) -> Union[str, Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCAmelCase__ ( A__ ):
@require_beam
def lowercase ( self : Tuple ):
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=__snake_case , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__snake_case , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __snake_case )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __snake_case )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__snake_case , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase ( self : List[Any] ):
import apache_beam as beam
_snake_case = beam.io.parquetio.WriteToParquet
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=__snake_case , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
_snake_case = partial(__snake_case , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__snake_case , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__snake_case , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __snake_case )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __snake_case )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(__snake_case , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=__snake_case )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowercase ( self : Optional[Any] ):
_snake_case = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = NestedBeamDataset(cache_dir=__snake_case , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__snake_case , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __snake_case )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __snake_case )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__snake_case , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 288 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCamelCase__: int = logging.get_logger(__name__)
UpperCamelCase__: Dict = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
UpperCamelCase__: Optional[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def snake_case_ ( _lowerCAmelCase : str ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = {}
with open(_lowerCAmelCase , '''r''' ) as file:
for line_number, line in enumerate(_lowerCAmelCase ):
UpperCAmelCase : List[str] = line.strip()
if line:
UpperCAmelCase : str = line.split()
UpperCAmelCase : Union[str, Any] = line_number
UpperCAmelCase : List[Any] = words[0]
UpperCAmelCase : Union[str, Any] = value
return result
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ) -> int:
for attribute in key.split('''.''' ):
UpperCAmelCase : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Dict = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCAmelCase ):
UpperCAmelCase : Any = PARAM_MAPPING[full_name.split('''.''' )[-1]]
UpperCAmelCase : Dict = '''param'''
if weight_type is not None and weight_type != "param":
UpperCAmelCase : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase : List[Any] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
UpperCAmelCase : Optional[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : int = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase : Union[str, Any] = value[0]
else:
UpperCAmelCase : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase : int = value
elif weight_type == "weight_g":
UpperCAmelCase : str = value
elif weight_type == "weight_v":
UpperCAmelCase : Dict = value
elif weight_type == "bias":
UpperCAmelCase : str = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
UpperCAmelCase : int = getattr(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Optional[int] = value
else:
UpperCAmelCase : Tuple = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] ) -> List[Any]:
UpperCAmelCase : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCAmelCase ):
UpperCAmelCase : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
UpperCAmelCase : Any = '''param'''
if weight_type is not None and weight_type != "param":
UpperCAmelCase : Optional[int] = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase : Optional[int] = '''.'''.join([key, hf_param_name] )
else:
UpperCAmelCase : List[Any] = key
UpperCAmelCase : Tuple = value if '''lm_head''' in full_key else value[0]
UpperCamelCase__: Tuple = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any=None , _lowerCAmelCase : Optional[Any]=None ) -> int:
UpperCAmelCase : List[Any] = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase : Optional[Any] = True
if "*" in mapped_key:
UpperCAmelCase : Tuple = name.split(_lowerCAmelCase )[0].split('''.''' )[-2]
UpperCAmelCase : List[Any] = mapped_key.replace('''*''' , _lowerCAmelCase )
if "weight_g" in name:
UpperCAmelCase : str = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase : int = '''weight_v'''
elif "bias" in name:
UpperCAmelCase : int = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : List[str] = '''weight'''
else:
UpperCAmelCase : Dict = None
if hf_dict is not None:
rename_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return is_used
return is_used
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ) -> Any:
UpperCAmelCase : Dict = []
UpperCAmelCase : Dict = fairseq_model.state_dict()
UpperCAmelCase : Union[str, Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase : Any = True
else:
UpperCAmelCase : Optional[Any] = load_wavaveca_layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Any = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase : Optional[int] = name.split('''.''' )
UpperCAmelCase : Tuple = int(items[0] )
UpperCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[int]=False ) -> Dict:
if config_path is not None:
UpperCAmelCase : List[str] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
else:
UpperCAmelCase : List[Any] = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase : Optional[Any] = read_txt_into_dict(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = idalabel
UpperCAmelCase : Optional[Any] = WavaVecaForSequenceClassification(_lowerCAmelCase )
UpperCAmelCase : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
feature_extractor.save_pretrained(_lowerCAmelCase )
elif is_finetuned:
if dict_path:
UpperCAmelCase : Dict = Dictionary.load(_lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase : Any = target_dict.pad_index
UpperCAmelCase : Tuple = target_dict.bos_index
UpperCAmelCase : Optional[int] = target_dict.eos_index
UpperCAmelCase : Union[str, Any] = len(target_dict.symbols )
UpperCAmelCase : Dict = os.path.join(_lowerCAmelCase , '''vocab.json''' )
if not os.path.isdir(_lowerCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowerCAmelCase ) )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCAmelCase : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[str] = 1
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Optional[int] = WavaVecaCTCTokenizer(
_lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowerCAmelCase , )
UpperCAmelCase : int = True if config.feat_extract_norm == '''layer''' else False
UpperCAmelCase : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
UpperCAmelCase : str = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = WavaVecaForCTC(_lowerCAmelCase )
else:
UpperCAmelCase : Dict = WavaVecaForPreTraining(_lowerCAmelCase )
if is_finetuned or is_seq_class:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
UpperCAmelCase : Optional[Any] = argparse.Namespace(task='''audio_pretraining''' )
UpperCAmelCase : List[Any] = fairseq.tasks.setup_task(_lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCAmelCase )
UpperCAmelCase : Optional[int] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase__: Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
UpperCamelCase__: Any = parser.parse_args()
UpperCamelCase__: int = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 23 | 0 |
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : str ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = int(_lowerCAmelCase )
# Initialize Result
SCREAMING_SNAKE_CASE__ = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__lowerCamelCase : int = []
__lowerCamelCase : Optional[Any] = "0"
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__lowerCamelCase : int = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
__lowerCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__lowerCamelCase : Any = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__lowerCamelCase : Any = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
__lowerCamelCase : Union[str, Any] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 219 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : str ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case , cache_dir=__snake_case )
UpperCAmelCase : str = [t[-1] for t in os.walk(os.path.join(__snake_case , os.listdir(__snake_case )[0] , '''snapshots''' ) )]
UpperCAmelCase : str = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] ) -> Dict:
UpperCAmelCase , UpperCAmelCase : str = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case )
UpperCAmelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : Optional[Any] = jax.device_count()
UpperCAmelCase : Tuple = num_samples * [prompt]
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : Any = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[Any] = shard(__snake_case )
UpperCAmelCase : Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(__snake_case , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
UpperCAmelCase : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__snake_case ) == num_samples
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__snake_case )
UpperCAmelCase : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Optional[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase : Any = 50
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : int = num_samples * [prompt]
UpperCAmelCase : Union[str, Any] = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Dict = replicate(__snake_case )
UpperCAmelCase : int = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Tuple = shard(__snake_case )
UpperCAmelCase : Tuple = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def A ( self : int ) -> Dict:
UpperCAmelCase , UpperCAmelCase : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case )
UpperCAmelCase : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Union[str, Any] = jax.random.PRNGKey(0 )
UpperCAmelCase : List[str] = 50
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : List[Any] = num_samples * [prompt]
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : List[Any] = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = shard(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def A ( self : int ) -> Any:
UpperCAmelCase , UpperCAmelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
UpperCAmelCase : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
UpperCAmelCase : Union[str, Any] = 50
UpperCAmelCase : Optional[int] = jax.device_count()
UpperCAmelCase : List[str] = num_samples * [prompt]
UpperCAmelCase : Dict = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : Any = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : str = shard(__snake_case )
UpperCAmelCase : Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : int = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__snake_case , steps_offset=1 , )
UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__snake_case , safety_checker=__snake_case , )
UpperCAmelCase : Tuple = scheduler.create_state()
UpperCAmelCase : Dict = scheduler_state
UpperCAmelCase : str = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : int = jax.random.PRNGKey(0 )
UpperCAmelCase : Union[str, Any] = 50
UpperCAmelCase : Optional[Any] = jax.device_count()
UpperCAmelCase : Any = num_samples * [prompt]
UpperCAmelCase : Dict = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : str = replicate(__snake_case )
UpperCAmelCase : List[str] = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = shard(__snake_case )
UpperCAmelCase : Dict = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def A ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : List[Any] = num_samples * [prompt]
UpperCAmelCase : str = jax.random.split(jax.random.PRNGKey(0 ) , __snake_case )
UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , )
UpperCAmelCase : Dict = replicate(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline.prepare_inputs(__snake_case )
UpperCAmelCase : List[str] = shard(__snake_case )
UpperCAmelCase : Any = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase : Optional[int] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCAmelCase , UpperCAmelCase : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , use_memory_efficient_attention=__snake_case , )
UpperCAmelCase : int = replicate(__snake_case )
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
UpperCAmelCase : List[Any] = shard(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase : int = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 23 | 0 |
import os
import sys
import transformers
__snake_case :Optional[int] = "3"
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 49 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any]=1000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCAmelCase : str = n - 1
UpperCAmelCase : List[Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCAmelCase : List[str] = 0
while count < prec:
UpperCAmelCase : int = random.randint(2 , n - 1 )
UpperCAmelCase : List[str] = bin_exp_mod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if b != 1:
UpperCAmelCase : int = True
for _ in range(_lowerCAmelCase ):
if b == n - 1:
UpperCAmelCase : Dict = False
break
UpperCAmelCase : str = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
UpperCamelCase__: Optional[int] = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 23 | 0 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if num < 0:
return False
lowercase = num
lowercase = 0
while num > 0:
lowercase = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__: Optional[int] = logging.get_logger(__name__)
def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Tuple = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
UpperCAmelCase : Tuple = 1024
UpperCAmelCase : List[Any] = 4096
UpperCAmelCase : str = 24
UpperCAmelCase : List[Any] = 16
UpperCAmelCase : str = [5, 11, 17, 23]
UpperCAmelCase : List[Any] = [256, 512, 1024, 1024]
UpperCAmelCase : Tuple = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 768
UpperCAmelCase : Tuple = [1, 1, 1, 0.5]
UpperCAmelCase : int = [256, 512, 768, 768]
UpperCAmelCase : Any = 150
UpperCAmelCase : Tuple = 16
UpperCAmelCase : Any = (1, 384, 384)
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Tuple = '''project'''
if "ade" in checkpoint_url:
UpperCAmelCase : Any = True
UpperCAmelCase : str = 768
UpperCAmelCase : Optional[int] = [1, 1, 1, 0.5]
UpperCAmelCase : List[Any] = 150
UpperCAmelCase : List[Any] = 16
UpperCAmelCase : str = '''huggingface/label-files'''
UpperCAmelCase : Tuple = '''ade20k-id2label.json'''
UpperCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : List[Any] = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = [1, 150, 480, 480]
return config, expected_shape
def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> int:
UpperCAmelCase : List[str] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Tuple ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase : Tuple = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
UpperCAmelCase : int = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
UpperCAmelCase : Tuple = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
UpperCAmelCase : Any = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
UpperCAmelCase : str = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
UpperCAmelCase : Any = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
UpperCAmelCase : Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase : Optional[Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
UpperCAmelCase : Dict = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
UpperCAmelCase : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
UpperCAmelCase : Tuple = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
UpperCAmelCase : str = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
UpperCAmelCase : Dict = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
UpperCAmelCase : int = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
UpperCAmelCase : Tuple = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
UpperCAmelCase : int = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
UpperCAmelCase : List[str] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase : str = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
UpperCAmelCase : List[str] = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
UpperCAmelCase : Any = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
UpperCAmelCase : Optional[int] = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
UpperCAmelCase : Tuple = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase : Dict = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase : int = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase : Optional[Any] = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase : List[Any] = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase : Optional[int] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase : str = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase : List[str] = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase : Tuple = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase : int = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
UpperCAmelCase : Optional[int] = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
UpperCAmelCase : Dict = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
UpperCAmelCase : Any = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
UpperCAmelCase : Optional[int] = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
UpperCAmelCase : List[Any] = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
UpperCAmelCase : Optional[int] = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
UpperCAmelCase : Optional[Any] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
UpperCAmelCase : Optional[int] = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
UpperCAmelCase : List[Any] = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
UpperCAmelCase : List[str] = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
UpperCAmelCase : List[Any] = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
UpperCAmelCase : List[Any] = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
UpperCAmelCase : Tuple = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Optional[int] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
UpperCAmelCase : Tuple = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Tuple = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : int = in_proj_bias[: config.hidden_size]
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ) -> Any:
UpperCAmelCase , UpperCAmelCase : int = get_dpt_config(_lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
UpperCAmelCase : List[Any] = torch.load(_lowerCAmelCase , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(_lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase : Any = state_dict.pop(_lowerCAmelCase )
UpperCAmelCase : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
UpperCAmelCase : Optional[Any] = DPTForSemanticSegmentation(_lowerCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Check outputs on an image
UpperCAmelCase : int = 480 if '''ade''' in checkpoint_url else 384
UpperCAmelCase : List[Any] = DPTImageProcessor(size=_lowerCAmelCase )
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(_lowerCAmelCase , return_tensors='''pt''' )
# forward pass
UpperCAmelCase : Any = model(**_lowerCAmelCase ).logits if '''ade''' in checkpoint_url else model(**_lowerCAmelCase ).predicted_depth
if show_prediction:
UpperCAmelCase : Dict = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=_lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
UpperCamelCase__: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
UpperCamelCase__: Tuple = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 23 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_SCREAMING_SNAKE_CASE = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
_SCREAMING_SNAKE_CASE = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
_SCREAMING_SNAKE_CASE = r"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCamelCase = 0.0
for i, j in zip(__snake_case , __snake_case ):
n_correct += 1.0 if math_equivalence.is_equiv(__snake_case , __snake_case ) else 0.0
UpperCamelCase = n_correct / len(__snake_case )
return {
"accuracy": accuracy,
}
| 343 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase__: Optional[int] = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 23 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
lowercase = MODEL_FOR_MASKED_LM_MAPPING
lowercase = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowerCamelCase ( self : List[str] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowerCamelCase ( self : List[str] ):
snake_case__ : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
snake_case__ : str = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1E-0_5, """token""": 38_015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-0_5, """token""": 25_506, """token_str""": """ accuser"""},
] , )
snake_case__ : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-0_5,
"""token""": 38_015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-0_5,
"""token""": 25_506,
"""token_str""": """ accuser""",
},
] , )
snake_case__ : Any = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2E-0_5, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-0_5, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-0_5, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
snake_case__ : List[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2E-0_5, """token""": 35_676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-0_5, """token""": 16_416, """token_str""": """ELS"""},
] , )
snake_case__ : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-0_5,
"""token""": 35_676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-0_5, """token""": 16_416, """token_str""": """ELS"""},
] , )
snake_case__ : Tuple = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1E-0_5, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-0_5, """token""": 2_941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-0_5, """token""": 13_606, """token_str""": """ Clara"""},
] , )
snake_case__ : int = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
[
{
"""score""": 2.2E-0_5,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-0_5, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-0_5,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-0_5, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : List[Any] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
snake_case__ : List[Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__snake_case , __snake_case )
@slow
@require_torch
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Tuple = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(__snake_case )
@slow
@require_tf
def lowerCamelCase ( self : Dict ):
snake_case__ : Tuple = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(__snake_case )
def lowerCamelCase ( self : Dict , snake_case_ : Any ):
snake_case__ : List[str] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__snake_case ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_573, """token_str""": """ Chris"""},
] , )
snake_case__ : Optional[int] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__snake_case ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2_201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 12_790,
"""token_str""": """ Lyon""",
},
] , )
snake_case__ : Tuple = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__snake_case ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
snake_case__ : Optional[Any] = None
snake_case__ : Union[str, Any] = None
self.run_pipeline_test(__snake_case , [] )
@require_tf
def lowerCamelCase ( self : Dict ):
snake_case__ : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
snake_case__ : Dict = None
snake_case__ : Union[str, Any] = None
self.run_pipeline_test(__snake_case , [] )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Optional[int] ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
snake_case__ : Union[str, Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
snake_case__ : Optional[Any] = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def lowerCamelCase ( self : List[str] , snake_case_ : Any , snake_case_ : str ):
snake_case__ : int = fill_masker.tokenizer
snake_case__ : Tuple = fill_masker.model
snake_case__ : Optional[Any] = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
__snake_case , [
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
] , )
snake_case__ : int = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
__snake_case , [
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
] , )
snake_case__ : Union[str, Any] = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
__snake_case , [
[
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
],
[
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
],
] , )
with self.assertRaises(__snake_case ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__snake_case ):
fill_masker("""This is""" )
self.run_test_top_k(__snake_case , __snake_case )
self.run_test_targets(__snake_case , __snake_case )
self.run_test_top_k_targets(__snake_case , __snake_case )
self.fill_mask_with_duplicate_targets_and_top_k(__snake_case , __snake_case )
self.fill_mask_with_multiple_masks(__snake_case , __snake_case )
def lowerCamelCase ( self : Any , snake_case_ : Any , snake_case_ : List[Any] ):
snake_case__ : Tuple = tokenizer.get_vocab()
snake_case__ : List[str] = sorted(vocab.keys() )[:2]
# Pipeline argument
snake_case__ : Any = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case , targets=__snake_case )
snake_case__ : Dict = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
__snake_case , [
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
] , )
snake_case__ : Any = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __snake_case )
snake_case__ : int = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__snake_case ) )
# Call argument
snake_case__ : List[Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
snake_case__ : Union[str, Any] = fill_masker(f"This is a {tokenizer.mask_token}" , targets=__snake_case )
self.assertEqual(
__snake_case , [
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
] , )
snake_case__ : Tuple = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __snake_case )
snake_case__ : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__snake_case ) )
# Score equivalence
snake_case__ : int = fill_masker(f"This is a {tokenizer.mask_token}" , targets=__snake_case )
snake_case__ : int = [top_mask['''token_str'''] for top_mask in outputs]
snake_case__ : Optional[int] = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__snake_case ) == set(__snake_case ):
snake_case__ : List[str] = fill_masker(f"This is a {tokenizer.mask_token}" , targets=__snake_case )
snake_case__ : str = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) )
# Raises with invalid
with self.assertRaises(__snake_case ):
snake_case__ : Any = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__snake_case ):
snake_case__ : Union[str, Any] = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""""""] )
with self.assertRaises(__snake_case ):
snake_case__ : Any = fill_masker(f"This is a {tokenizer.mask_token}" , targets="""""" )
def lowerCamelCase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : Optional[int] ):
snake_case__ : List[Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case , top_k=2 )
snake_case__ : Optional[int] = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
__snake_case , [
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
] , )
snake_case__ : Union[str, Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
snake_case__ : Tuple = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
__snake_case , [
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
] , )
self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) )
def lowerCamelCase ( self : Dict , snake_case_ : List[Any] , snake_case_ : Tuple ):
snake_case__ : List[str] = tokenizer.get_vocab()
snake_case__ : Optional[int] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
# top_k=2, ntargets=3
snake_case__ : Union[str, Any] = sorted(vocab.keys() )[:3]
snake_case__ : Any = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=__snake_case )
# If we use the most probably targets, and filter differently, we should still
# have the same results
snake_case__ : Union[str, Any] = [el['''token_str'''] for el in sorted(__snake_case , key=lambda snake_case_ : x["score"] , reverse=__snake_case )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__snake_case ).issubset(__snake_case ):
snake_case__ : Tuple = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=__snake_case )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) )
def lowerCamelCase ( self : Tuple , snake_case_ : Dict , snake_case_ : Dict ):
snake_case__ : List[str] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
snake_case__ : str = tokenizer.get_vocab()
# String duplicates + id duplicates
snake_case__ : Optional[int] = sorted(vocab.keys() )[:3]
snake_case__ : Union[str, Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
snake_case__ : List[Any] = fill_masker(f"My name is {tokenizer.mask_token}" , targets=__snake_case , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__snake_case ) , 3 )
def lowerCamelCase ( self : Tuple , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ):
snake_case__ : List[Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
snake_case__ : Optional[Any] = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
__snake_case , [
[
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
],
[
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
],
[
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
{"""sequence""": ANY(__snake_case ), """score""": ANY(__snake_case ), """token""": ANY(__snake_case ), """token_str""": ANY(__snake_case )},
],
] , )
| 35 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : list[int | float] , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | float:
if len(_lowerCAmelCase ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(_lowerCAmelCase )
or left < -len(_lowerCAmelCase )
or right >= len(_lowerCAmelCase )
or right < -len(_lowerCAmelCase )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
UpperCAmelCase : List[Any] = (left + right) >> 1 # the middle
UpperCAmelCase : Optional[Any] = find_max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # find max in range[left, mid]
UpperCAmelCase : Dict = find_max(_lowerCAmelCase , mid + 1 , _lowerCAmelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 23 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_A = None
_A = logging.get_logger(__name__)
_A = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_A = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
_A = {
"facebook/mbart-large-en-ro": 1_024,
"facebook/mbart-large-cc25": 1_024,
}
# fmt: off
_A = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class lowercase_ ( A__ ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = ["""input_ids""", """attention_mask"""]
A__ : Dict = MBartTokenizer
A__ : List[str] = []
A__ : int = []
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
vocab_file=__snake_case , tokenizer_file=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
UpperCamelCase_ = vocab_file
UpperCamelCase_ = False if not self.vocab_file else True
UpperCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
UpperCamelCase_ = {
lang_code: self.convert_tokens_to_ids(__snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase_ = src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCamelCase_ = src_lang
UpperCamelCase_ = self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case )
UpperCamelCase_ = self.convert_tokens_to_ids(__snake_case )
UpperCamelCase_ = tgt_lang_id
return inputs
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = "en_XX" , __UpperCamelCase = None , __UpperCamelCase = "ro_RO" , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = src_lang
UpperCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case )
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.convert_tokens_to_ids(__snake_case )
UpperCamelCase_ = []
UpperCamelCase_ = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.convert_tokens_to_ids(__snake_case )
UpperCamelCase_ = []
UpperCamelCase_ = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
UpperCamelCase_ = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 122 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self : Union[str, Any] , __snake_case : UNetaDModel , __snake_case : ScoreSdeVeScheduler ) -> int:
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : Optional[int] , __snake_case : int = 1 , __snake_case : int = 2000 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , **__snake_case : Optional[int] , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase : str = self.unet.config.sample_size
UpperCAmelCase : Union[str, Any] = (batch_size, 3, img_size, img_size)
UpperCAmelCase : int = self.unet
UpperCAmelCase : Any = randn_tensor(__snake_case , generator=__snake_case ) * self.scheduler.init_noise_sigma
UpperCAmelCase : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(__snake_case )
self.scheduler.set_sigmas(__snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase : Any = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase : Union[str, Any] = self.unet(__snake_case , __snake_case ).sample
UpperCAmelCase : Optional[Any] = self.scheduler.step_correct(__snake_case , __snake_case , generator=__snake_case ).prev_sample
# prediction step
UpperCAmelCase : Optional[Any] = model(__snake_case , __snake_case ).sample
UpperCAmelCase : List[str] = self.scheduler.step_pred(__snake_case , __snake_case , __snake_case , generator=__snake_case )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = output.prev_sample, output.prev_sample_mean
UpperCAmelCase : int = sample_mean.clamp(0 , 1 )
UpperCAmelCase : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Optional[Any] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__snake_case )
| 23 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =FileLock(str(tmpdir / 'foo.lock' ) )
SCREAMING_SNAKE_CASE =FileLock(str(tmpdir / 'foo.lock' ) )
SCREAMING_SNAKE_CASE =0.01
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE =time.time()
locka.acquire(_lowerCAmelCase )
assert time.time() - _start > timeout
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='''a''' * 1000 + '''.lock'''
SCREAMING_SNAKE_CASE =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(_lowerCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
SCREAMING_SNAKE_CASE =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
locka.acquire(0 )
| 334 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """MCTCTFeatureExtractor"""
lowerCamelCase__ = """AutoTokenizer"""
def __init__( self : Dict , __snake_case : Optional[int] , __snake_case : List[str] ) -> str:
super().__init__(__snake_case , __snake_case )
UpperCAmelCase : List[Any] = self.feature_extractor
UpperCAmelCase : Union[str, Any] = False
def __call__( self : Any , *__snake_case : List[str] , **__snake_case : Any ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
UpperCAmelCase : int = kwargs.pop('''raw_speech''' )
else:
UpperCAmelCase : Union[str, Any] = kwargs.pop('''audio''' , __snake_case )
UpperCAmelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __snake_case )
UpperCAmelCase : Dict = kwargs.pop('''text''' , __snake_case )
if len(__snake_case ) > 0:
UpperCAmelCase : Any = args[0]
UpperCAmelCase : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
UpperCAmelCase : List[str] = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
UpperCAmelCase : int = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase : str = encodings['''input_ids''']
return inputs
def A ( self : List[Any] , *__snake_case : List[Any] , **__snake_case : List[Any] ) -> str:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def A ( self : List[Any] , *__snake_case : int , **__snake_case : Optional[int] ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__snake_case , **__snake_case )
UpperCAmelCase : List[Any] = kwargs.pop('''input_features''' , __snake_case )
UpperCAmelCase : Optional[Any] = kwargs.pop('''labels''' , __snake_case )
if len(__snake_case ) > 0:
UpperCAmelCase : List[str] = args[0]
UpperCAmelCase : List[Any] = args[1:]
if input_features is not None:
UpperCAmelCase : Tuple = self.feature_extractor.pad(__snake_case , *__snake_case , **__snake_case )
if labels is not None:
UpperCAmelCase : Optional[int] = self.tokenizer.pad(__snake_case , **__snake_case )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase : List[str] = labels['''input_ids''']
return input_features
def A ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@contextmanager
def A ( self : Any ) -> Optional[int]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
UpperCAmelCase : Dict = True
UpperCAmelCase : List[Any] = self.tokenizer
yield
UpperCAmelCase : Tuple = self.feature_extractor
UpperCAmelCase : List[Any] = False
| 23 | 0 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_lowerCAmelCase : int = datasets.utils.logging.get_logger(__name__)
@dataclass
class __magic_name__ ( datasets.BuilderConfig ):
SCREAMING_SNAKE_CASE = 1_0_0_0_0
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
class __magic_name__ ( datasets.ArrowBasedBuilder ):
SCREAMING_SNAKE_CASE = ParquetConfig
def __magic_name__ ( self ) -> str:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __magic_name__ ( self , __snake_case ) -> Optional[int]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
__a =dl_manager.download_and_extract(self.config.data_files )
if isinstance(__snake_case , (str, list, tuple) ):
__a =data_files
if isinstance(__snake_case , __snake_case ):
__a =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__a =[dl_manager.iter_files(__snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
__a =[]
for split_name, files in data_files.items():
if isinstance(__snake_case , __snake_case ):
__a =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__a =[dl_manager.iter_files(__snake_case ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__snake_case ):
with open(__snake_case , 'rb' ) as f:
__a =datasets.Features.from_arrow_schema(pq.read_schema(__snake_case ) )
break
splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={'files': files} ) )
return splits
def __magic_name__ ( self , __snake_case ) -> pa.Table:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__a =table_cast(__snake_case , self.info.features.arrow_schema )
return pa_table
def __magic_name__ ( self , __snake_case ) -> Dict:
'''simple docstring'''
__a =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(__snake_case ) ):
with open(__snake_case , 'rb' ) as f:
__a =pq.ParquetFile(__snake_case )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__a =pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(__snake_case )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(__snake_case )}: {e}' )
raise
| 218 |
'''simple docstring'''
from math import isclose, sqrt
def snake_case_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> tuple[float, float, float]:
UpperCAmelCase : Optional[int] = point_y / 4 / point_x
UpperCAmelCase : str = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
UpperCAmelCase : Any = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
UpperCAmelCase : Union[str, Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
UpperCAmelCase : Union[str, Any] = outgoing_gradient**2 + 4
UpperCAmelCase : Dict = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
UpperCAmelCase : List[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
UpperCAmelCase : List[str] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
UpperCAmelCase : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
UpperCAmelCase : Optional[Any] = x_minus if isclose(_lowerCAmelCase , _lowerCAmelCase ) else x_plus
UpperCAmelCase : Union[str, Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def snake_case_ ( _lowerCAmelCase : float = 1.4 , _lowerCAmelCase : float = -9.6 ) -> int:
UpperCAmelCase : int = 0
UpperCAmelCase : float = first_x_coord
UpperCAmelCase : float = first_y_coord
UpperCAmelCase : float = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = next_point(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"{solution() = }")
| 23 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Tuple = logging.get_logger(__name__)
_A : List[Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class _lowercase ( A__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = """mra"""
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Tuple=5_02_65 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : Tuple=30_72 , SCREAMING_SNAKE_CASE__ : str="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_12 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : Dict=0.0_2 , SCREAMING_SNAKE_CASE__ : int=1e-5 , SCREAMING_SNAKE_CASE__ : str="absolute" , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : List[str]="full" , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , **SCREAMING_SNAKE_CASE__ : int , ) -> str:
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = block_per_row
__lowerCAmelCase = approx_mode
__lowerCAmelCase = initial_prior_first_n_blocks
__lowerCAmelCase = initial_prior_diagonal_n_blocks
| 229 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__: str = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: int = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: Union[str, Any] = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: int = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCamelCase__: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Union[str, Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case=True ) -> Any:
'''simple docstring'''
model.train()
lowerCamelCase__ = model(_lowerCAmelCase )
lowerCamelCase__ = F.mse_loss(_lowerCAmelCase ,target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCAmelCase )
def lowerCAmelCase__(__snake_case ,__snake_case=False ) -> str:
'''simple docstring'''
set_seed(42 )
lowerCamelCase__ = RegressionModel()
lowerCamelCase__ = deepcopy(_lowerCAmelCase )
lowerCamelCase__ = RegressionDataset(length=80 )
lowerCamelCase__ = DataLoader(_lowerCAmelCase ,batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase__ = AdamW(params=model.parameters() ,lr=1E-3 )
lowerCamelCase__ = AdamW(params=ddp_model.parameters() ,lr=1E-3 )
lowerCamelCase__ = LambdaLR(_lowerCAmelCase ,lr_lambda=lambda __snake_case : epoch**0.6_5 )
lowerCamelCase__ = LambdaLR(_lowerCAmelCase ,lr_lambda=lambda __snake_case : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowerCamelCase__ = accelerator.prepare(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
else:
lowerCamelCase__ = accelerator.prepare(_lowerCAmelCase ,_lowerCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = get_training_setup(_lowerCAmelCase )
# Use a single batch
lowerCamelCase__ = next(iter(_lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase__ = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCAmelCase ):
step_model(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
else:
# Sync grads
step_model(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad ,ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase__ = ddp_input[torch.randperm(len(_lowerCAmelCase ) )]
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = get_training_setup(_lowerCAmelCase )
# Use a single batch
lowerCamelCase__ = next(iter(_lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase__ = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCAmelCase ):
step_model(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
else:
# Sync grads
step_model(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase__ = ddp_input[torch.randperm(len(_lowerCAmelCase ) )]
def lowerCAmelCase__(__snake_case=False ,__snake_case=False ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = Accelerator(
split_batches=_lowerCAmelCase ,dispatch_batches=_lowerCAmelCase ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase__ = get_training_setup(_lowerCAmelCase )
for iteration, batch in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase__ = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCAmelCase ):
step_model(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase__ = ddp_input[torch.randperm(len(_lowerCAmelCase ) )]
GradientState._reset_state()
def lowerCAmelCase__(__snake_case=False ,__snake_case=False ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = Accelerator(
split_batches=_lowerCAmelCase ,dispatch_batches=_lowerCAmelCase ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase__ = get_training_setup(_lowerCAmelCase ,_lowerCAmelCase )
for iteration, batch in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase__ = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCAmelCase ):
step_model(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
lowerCamelCase__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowerCAmelCase__() -> Tuple:
'''simple docstring'''
lowerCamelCase__ = Accelerator()
lowerCamelCase__ = RegressionDataset(length=80 )
lowerCamelCase__ = DataLoader(_lowerCAmelCase ,batch_size=16 )
lowerCamelCase__ = RegressionDataset(length=96 )
lowerCamelCase__ = DataLoader(_lowerCAmelCase ,batch_size=16 )
lowerCamelCase__ = accelerator.prepare(_lowerCAmelCase ,_lowerCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCAmelCase )
if iteration < len(_lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCAmelCase )
if batch_num < len(_lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase__() -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = Accelerator()
lowerCamelCase__ = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(_lowerCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(_lowerCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' ,F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' ,)
test_gradient_accumulation(_lowerCAmelCase ,_lowerCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' ,'''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' ,'''`split_batches=False`, `dispatch_batches=False`**''' ,)
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' ,F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' ,)
test_gradient_accumulation_with_opt_and_scheduler(_lowerCAmelCase ,_lowerCAmelCase )
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 209 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = AltDiffusionPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def A ( self : Dict ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase : Dict = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
UpperCAmelCase : List[Any] = CLIPTextModel(__snake_case )
UpperCAmelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
UpperCAmelCase : Optional[int] = 77
UpperCAmelCase : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : Optional[Any] , __snake_case : Dict , __snake_case : List[str]=0 ) -> Union[str, Any]:
if str(__snake_case ).startswith('''mps''' ):
UpperCAmelCase : str = torch.manual_seed(__snake_case )
else:
UpperCAmelCase : Tuple = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
UpperCAmelCase : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Union[str, Any] ) -> List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def A ( self : Tuple ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def A ( self : Tuple ) -> Optional[int]:
UpperCAmelCase : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Any = self.get_dummy_components()
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase : List[str] = RobertaSeriesModelWithTransformation(__snake_case )
UpperCAmelCase : str = text_encoder
UpperCAmelCase : Optional[int] = AltDiffusionPipeline(**__snake_case )
UpperCAmelCase : str = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : Optional[int] = self.get_dummy_inputs(__snake_case )
UpperCAmelCase : Optional[int] = '''A photo of an astronaut'''
UpperCAmelCase : List[Any] = alt_pipe(**__snake_case )
UpperCAmelCase : Optional[Any] = output.images
UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : int ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : int = PNDMScheduler(skip_prk_steps=__snake_case )
torch.manual_seed(0 )
UpperCAmelCase : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase : Union[str, Any] = RobertaSeriesModelWithTransformation(__snake_case )
UpperCAmelCase : Union[str, Any] = text_encoder
UpperCAmelCase : Optional[int] = AltDiffusionPipeline(**__snake_case )
UpperCAmelCase : Dict = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : int = self.get_dummy_inputs(__snake_case )
UpperCAmelCase : Optional[int] = alt_pipe(**__snake_case )
UpperCAmelCase : Optional[int] = output.images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Optional[int] = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : str ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ) -> Any:
# make sure here that pndm scheduler skips prk
UpperCAmelCase : List[Any] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=__snake_case )
UpperCAmelCase : Tuple = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : List[Any] = '''A painting of a squirrel eating a burger'''
UpperCAmelCase : Any = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = alt_pipe([prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase : Dict = output.images
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : Tuple ) -> int:
UpperCAmelCase : int = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
UpperCAmelCase : Tuple = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=__snake_case , safety_checker=__snake_case )
UpperCAmelCase : Dict = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : Tuple = '''A painting of a squirrel eating a burger'''
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : List[Any] = alt_pipe([prompt] , generator=__snake_case , num_inference_steps=2 , output_type='''numpy''' )
UpperCAmelCase : Dict = output.images
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : Union[str, Any] = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 23 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def _UpperCAmelCase ( __lowerCamelCase : list[Any] ) -> None:
create_state_space_tree(_lowerCAmelCase , [] , 0 )
def _UpperCAmelCase ( __lowerCamelCase : list[Any] , __lowerCamelCase : list[Any] , __lowerCamelCase : int ) -> None:
if index == len(_lowerCAmelCase ):
print(_lowerCAmelCase )
return
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
UpperCAmelCase__ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 288 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ) -> Optional[int]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] ) -> Dict:
UpperCAmelCase : Dict = tmp_path / '''cache'''
UpperCAmelCase : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : str = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : int ) -> Optional[int]:
UpperCAmelCase : Any = tmp_path / '''cache'''
UpperCAmelCase : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : Any = features.copy() if features else default_expected_features
UpperCAmelCase : List[Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Dict = JsonDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple ) -> Tuple:
UpperCAmelCase : Optional[Any] = tmp_path / '''cache'''
UpperCAmelCase : Optional[int] = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCAmelCase : int = features.copy() if features else default_expected_features
UpperCAmelCase : Any = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = JsonDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> Union[str, Any]:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCAmelCase : Tuple = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCAmelCase : List[str] = features.copy()
UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = tmp_path / '''cache'''
UpperCAmelCase : List[str] = JsonDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> Optional[Any]:
UpperCAmelCase : Any = tmp_path / '''cache'''
UpperCAmelCase : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : List[Any] = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Dict:
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase : str = jsonl_path
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase : Dict = [jsonl_path]
UpperCAmelCase : int = tmp_path / '''cache'''
UpperCAmelCase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : Optional[int] = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict=("train",) ) -> Union[str, Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
for split in splits:
UpperCAmelCase : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ) -> Any:
UpperCAmelCase : Optional[Any] = tmp_path / '''cache'''
UpperCAmelCase : List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Optional[int] = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_json_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> int:
UpperCAmelCase : Dict = tmp_path / '''cache'''
UpperCAmelCase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : Optional[int] = features.copy() if features else default_expected_features
UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = JsonDatasetReader({'''train''': jsonl_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_json_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ) -> Union[str, Any]:
if split:
UpperCAmelCase : Optional[int] = {split: jsonl_path}
else:
UpperCAmelCase : Any = '''train'''
UpperCAmelCase : Any = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCAmelCase : Tuple = tmp_path / '''cache'''
UpperCAmelCase : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : Optional[Any] = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_json_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case_ ( _lowerCAmelCase : List[str] ) -> str:
return json.load(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Dict ) -> str:
return [json.loads(_lowerCAmelCase ) for line in buffer]
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def A ( self : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write()
buffer.seek(0 )
UpperCAmelCase : Union[str, Any] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def A ( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Optional[Any] ) -> List[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write()
buffer.seek(0 )
UpperCAmelCase : Union[str, Any] = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def A ( self : str , __snake_case : str , __snake_case : str , __snake_case : int ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
UpperCAmelCase : Any = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def A ( self : Any , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
UpperCAmelCase : List[str] = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
def A ( self : List[Any] , __snake_case : str ) -> Dict:
with pytest.raises(__snake_case ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def A ( self : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict ) -> Union[str, Any]:
UpperCAmelCase : List[str] = tmp_path_factory.mktemp('''data''' ) / F"""test.json.{extension}"""
UpperCAmelCase : List[Any] = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write()
with fsspec.open(__snake_case , '''rb''' , compression='''infer''' ) as f:
UpperCAmelCase : str = f.read()
with fsspec.open(__snake_case , '''rb''' , compression='''infer''' ) as f:
UpperCAmelCase : Optional[int] = f.read()
assert exported_content == original_content
| 23 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : str = {}
class __snake_case ( A__ ):
lowerCAmelCase_ = "llama"
lowerCAmelCase_ = ["past_key_values"]
def __init__( self : str , _lowercase : Any=3_20_00 , _lowercase : int=40_96 , _lowercase : Optional[Any]=1_10_08 , _lowercase : int=32 , _lowercase : Any=32 , _lowercase : Any=None , _lowercase : Union[str, Any]="silu" , _lowercase : List[str]=20_48 , _lowercase : Optional[int]=0.02 , _lowercase : Any=1E-6 , _lowercase : Tuple=True , _lowercase : int=0 , _lowercase : int=1 , _lowercase : Any=2 , _lowercase : Dict=1 , _lowercase : Any=False , _lowercase : Any=None , **_lowercase : List[Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = num_key_value_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = rms_norm_eps
SCREAMING_SNAKE_CASE__ = pretraining_tp
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , tie_word_embeddings=__snake_case , **__snake_case , )
def __a ( self : int ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
SCREAMING_SNAKE_CASE__ = self.rope_scaling.get("""type""" , __snake_case )
SCREAMING_SNAKE_CASE__ = self.rope_scaling.get("""factor""" , __snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__snake_case , __snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 219 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__: Tuple = logging.get_logger(__name__)
UpperCamelCase__: Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCamelCase__: Optional[int] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
UpperCamelCase__: Dict = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
UpperCamelCase__: Tuple = "▁"
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __snake_case : List[Any] , __snake_case : Tuple="<s>" , __snake_case : List[Any]="</s>" , __snake_case : int="</s>" , __snake_case : Any="<s>" , __snake_case : Optional[int]="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : Union[str, Any]="<mask>" , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Dict , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : int = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
UpperCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
UpperCAmelCase : Optional[int] = vocab_file
UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
UpperCAmelCase : int = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
UpperCAmelCase : Optional[Any] = len(self.sp_model ) - 1
UpperCAmelCase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A ( self : Tuple , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def A ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : Tuple = [self.sep_token_id]
UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self : Dict ) -> Optional[int]:
return len(self.sp_model )
def A ( self : List[str] ) -> Dict:
UpperCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : Optional[Any] , __snake_case : str ) -> List[str]:
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def A ( self : int , __snake_case : int ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase : Optional[Any] = self.sp_model.PieceToId(__snake_case )
return spm_id if spm_id else self.unk_token_id
def A ( self : int , __snake_case : Any ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__snake_case )
def A ( self : List[Any] , __snake_case : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : int = ''''''
UpperCAmelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__snake_case ) + token
UpperCAmelCase : str = True
UpperCAmelCase : List[str] = []
else:
current_sub_tokens.append(__snake_case )
UpperCAmelCase : Optional[int] = False
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def __getstate__( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = self.__dict__.copy()
UpperCAmelCase : Any = None
return state
def __setstate__( self : Optional[int] , __snake_case : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase : Optional[Any] = {}
UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : Optional[int] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : Union[str, Any] = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , '''wb''' ) as fi:
UpperCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
| 23 | 0 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(_lowerCAmelCase ) * abs(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 49 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
UpperCamelCase__: Tuple = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A ( cls : Union[str, Any] ) -> int:
UpperCAmelCase : Optional[Any] = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def A ( cls : List[str] ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A ( self : int ) -> Tuple:
UpperCAmelCase : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : Dict = FlaxBertModel(__snake_case )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
UpperCAmelCase : List[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__snake_case , repo_id='''test-model-flax''' , push_to_hub=__snake_case , use_auth_token=self._token )
UpperCAmelCase : str = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
UpperCAmelCase : Any = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Optional[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" )
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : Optional[Any] = FlaxBertModel(__snake_case )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase : List[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__snake_case , use_auth_token=self._token )
UpperCAmelCase : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase : Any = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" )
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ) -> Union[str, Any]:
UpperCAmelCase : str = True
UpperCAmelCase : int = flatten_dict(modela.params )
UpperCAmelCase : Dict = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase : Dict = False
return models_are_equal
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase : Dict = FlaxBertModel(__snake_case )
UpperCAmelCase : int = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__snake_case , __snake_case ) )
with self.assertRaises(__snake_case ):
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase : str = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertTrue(check_models_equal(__snake_case , __snake_case ) )
def A ( self : List[str] ) -> Dict:
UpperCAmelCase : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase : Dict = FlaxBertModel(__snake_case )
UpperCAmelCase : Optional[int] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__snake_case , __snake_case ) , max_shard_size='''10KB''' )
with self.assertRaises(__snake_case ):
UpperCAmelCase : Any = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertTrue(check_models_equal(__snake_case , __snake_case ) )
def A ( self : Optional[int] ) -> str:
UpperCAmelCase : Dict = '''bert'''
UpperCAmelCase : int = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__snake_case ):
UpperCAmelCase : Optional[Any] = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertIsNotNone(__snake_case )
def A ( self : Dict ) -> List[Any]:
UpperCAmelCase : Optional[int] = '''bert'''
UpperCAmelCase : int = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__snake_case ):
UpperCAmelCase : Dict = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertIsNotNone(__snake_case )
| 23 | 0 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase__ :str = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
lowercase__ :Tuple = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
lowercase__ :Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return x[0]
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_letter_count(_lowerCAmelCase )
lowercase = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_lowerCAmelCase )
lowercase = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_lowerCAmelCase )
lowercase = ''''''.join(freq_to_letter[freq] )
lowercase = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_lowerCAmelCase , reverse=_lowerCAmelCase )
lowercase = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_lowerCAmelCase )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_frequency_order(_lowerCAmelCase )
lowercase = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : str , __snake_case : Union[str, Any]=2 , __snake_case : Optional[int]=8 , __snake_case : Any=True , __snake_case : Union[str, Any]=True , __snake_case : Dict=True , __snake_case : int=True , __snake_case : List[Any]=99 , __snake_case : str=16 , __snake_case : Tuple=5 , __snake_case : Tuple=2 , __snake_case : str=36 , __snake_case : Dict="gelu" , __snake_case : str=0.0 , __snake_case : Optional[int]=0.0 , __snake_case : Optional[int]=512 , __snake_case : Optional[Any]=16 , __snake_case : int=2 , __snake_case : int=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : str=None , ) -> Optional[int]:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : int = use_input_mask
UpperCAmelCase : Any = use_token_type_ids
UpperCAmelCase : str = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : str = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = num_labels
UpperCAmelCase : Optional[int] = num_choices
UpperCAmelCase : Any = scope
def A ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Dict = None
if self.use_token_type_ids:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : str = None
UpperCAmelCase : Tuple = None
UpperCAmelCase : int = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : int ) -> Tuple:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Optional[Any] = self.get_config()
UpperCAmelCase : int = 300
return config
def A ( self : Optional[Any] ) -> Any:
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A ( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] , __snake_case : int , __snake_case : Dict , __snake_case : Tuple , __snake_case : Optional[Any] ) -> List[str]:
UpperCAmelCase : int = MraModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
UpperCAmelCase : Optional[int] = model(__snake_case , token_type_ids=__snake_case )
UpperCAmelCase : Dict = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Optional[Any] , ) -> Tuple:
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = MraModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
UpperCAmelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , )
UpperCAmelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : int ) -> Any:
UpperCAmelCase : Dict = MraForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Tuple , __snake_case : Tuple , __snake_case : Dict , __snake_case : Dict , __snake_case : Any , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Tuple ) -> Optional[int]:
UpperCAmelCase : List[str] = MraForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : str , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> int:
UpperCAmelCase : int = self.num_labels
UpperCAmelCase : Union[str, Any] = MraForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : List[str] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : str , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict ) -> int:
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : List[str] = MraForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : str , __snake_case : int , __snake_case : Any , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : str , __snake_case : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.num_choices
UpperCAmelCase : int = MraForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : str ) -> Dict:
UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[str] = config_and_inputs
UpperCAmelCase : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = ()
def A ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = MraModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def A ( self : Optional[Any] ) -> str:
self.config_tester.run_common_tests()
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : Tuple ) -> Dict:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def A ( self : Tuple ) -> List[str]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def A ( self : int ) -> Dict:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def A ( self : Any ) -> Optional[int]:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def A ( self : Dict ) -> Any:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = MraModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='''MRA does not output attentions''' )
def A ( self : str ) -> Optional[Any]:
return
@require_torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Tuple ) -> List[Any]:
UpperCAmelCase : int = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
UpperCAmelCase : Optional[Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(__snake_case )[0]
UpperCAmelCase : Optional[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Any = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Optional[int] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
UpperCAmelCase : Dict = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(__snake_case )[0]
UpperCAmelCase : int = 50265
UpperCAmelCase : int = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def A ( self : str ) -> List[Any]:
UpperCAmelCase : List[Any] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
UpperCAmelCase : List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : Tuple = model(__snake_case )[0]
UpperCAmelCase : Optional[int] = 50265
UpperCAmelCase : Tuple = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Optional[int] = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
| 23 | 0 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_SCREAMING_SNAKE_CASE = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"]
_SCREAMING_SNAKE_CASE = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = " Hello world! cécé herlolip"
_SCREAMING_SNAKE_CASE = [
("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"),
("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"),
("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"),
("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"),
]
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase = dct.pop(_lowerCAmelCase )
UpperCamelCase = val
def lowercase( UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = torch.load(_lowerCAmelCase , map_location="""cpu""" )
UpperCamelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowercase( UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = emb.weight.shape
UpperCamelCase = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
UpperCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ) -> List[Any]:
'''simple docstring'''
if not os.path.exists(_lowerCAmelCase ):
UpperCamelCase = torch.hub.load("""pytorch/fairseq""" , _lowerCAmelCase ).eval()
else:
UpperCamelCase = load_xsum_checkpoint(_lowerCAmelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
UpperCamelCase = checkpoint_path.replace(""".""" , """-""" )
UpperCamelCase = BartConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase = bart.encode(_lowerCAmelCase ).unsqueeze(0 )
UpperCamelCase = BartTokenizer.from_pretrained(_lowerCAmelCase ).encode(_lowerCAmelCase , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(_lowerCAmelCase , _lowerCAmelCase ).all():
raise ValueError(
f"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
UpperCamelCase = bart.state_dict()
remove_ignore_keys_(_lowerCAmelCase )
UpperCamelCase = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase = BartForSequenceClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
UpperCamelCase = bart.predict("""mnli""" , _lowerCAmelCase , return_logits=_lowerCAmelCase )
UpperCamelCase = model(_lowerCAmelCase )[0] # logits
else: # no classification heads to worry about
UpperCamelCase = bart.model.state_dict()
remove_ignore_keys_(_lowerCAmelCase )
UpperCamelCase = state_dict['''decoder.embed_tokens.weight''']
UpperCamelCase = bart.extract_features(_lowerCAmelCase )
if hf_checkpoint_name == "facebook/bart-large":
UpperCamelCase = BartModel(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
UpperCamelCase = model(_lowerCAmelCase ).model[0]
else:
UpperCamelCase = BartForConditionalGeneration(_lowerCAmelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(_lowerCAmelCase )
if hasattr(_lowerCAmelCase , """lm_head""" ):
UpperCamelCase = make_linear_from_emb(model.model.shared )
UpperCamelCase = model.model(_lowerCAmelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 343 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Any ) -> str:
UpperCAmelCase : Any = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
UpperCAmelCase : int = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__snake_case ) , __snake_case )
def A ( self : int ) -> str:
UpperCAmelCase : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__snake_case ) , x.transpose() ) )
UpperCAmelCase : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self : str ) -> Union[str, Any]:
UpperCAmelCase : Any = np.random.randn(3 , 4 )
UpperCAmelCase : List[Any] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) , transpose(__snake_case ).numpy() ) )
UpperCAmelCase : Tuple = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Any = torch.tensor(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , transpose(__snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase : int = np.random.randn(3 , 4 )
UpperCAmelCase : Optional[int] = tf.constant(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) , transpose(__snake_case ).numpy() ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : str = tf.constant(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , transpose(__snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self : Tuple ) -> Any:
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = jnp.array(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) , np.asarray(transpose(__snake_case ) ) ) )
UpperCAmelCase : Dict = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : int = jnp.array(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(__snake_case , axes=(1, 2, 0) ) ) ) )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , np.reshape(__snake_case , (4, 3) ) ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , np.reshape(__snake_case , (12, 5) ) ) )
@require_torch
def A ( self : Union[str, Any] ) -> int:
UpperCAmelCase : Dict = np.random.randn(3 , 4 )
UpperCAmelCase : Optional[Any] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , reshape(__snake_case , (4, 3) ).numpy() ) )
UpperCAmelCase : List[str] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[Any] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , reshape(__snake_case , (12, 5) ).numpy() ) )
@require_tf
def A ( self : int ) -> List[str]:
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = tf.constant(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , reshape(__snake_case , (4, 3) ).numpy() ) )
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Optional[Any] = tf.constant(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , reshape(__snake_case , (12, 5) ).numpy() ) )
@require_flax
def A ( self : Any ) -> Dict:
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = jnp.array(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , np.asarray(reshape(__snake_case , (4, 3) ) ) ) )
UpperCAmelCase : Any = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Optional[Any] = jnp.array(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , np.asarray(reshape(__snake_case , (12, 5) ) ) ) )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__snake_case ) , np.squeeze(__snake_case ) ) )
UpperCAmelCase : str = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , np.squeeze(__snake_case , axis=2 ) ) )
@require_torch
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : List[str] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) , squeeze(__snake_case ).numpy() ) )
UpperCAmelCase : Any = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : str = torch.tensor(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , squeeze(__snake_case , axis=2 ).numpy() ) )
@require_tf
def A ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : int = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Optional[int] = tf.constant(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) , squeeze(__snake_case ).numpy() ) )
UpperCAmelCase : List[str] = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : Optional[int] = tf.constant(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , squeeze(__snake_case , axis=2 ).numpy() ) )
@require_flax
def A ( self : List[Any] ) -> Dict:
UpperCAmelCase : Optional[int] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : int = jnp.array(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) , np.asarray(squeeze(__snake_case ) ) ) )
UpperCAmelCase : str = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : int = jnp.array(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , np.asarray(squeeze(__snake_case , axis=2 ) ) ) )
def A ( self : Optional[Any] ) -> int:
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , np.expand_dims(__snake_case , axis=1 ) ) )
@require_torch
def A ( self : List[str] ) -> Tuple:
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : Tuple = torch.tensor(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , expand_dims(__snake_case , axis=1 ).numpy() ) )
@require_tf
def A ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
UpperCAmelCase : Any = tf.constant(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , expand_dims(__snake_case , axis=1 ).numpy() ) )
@require_flax
def A ( self : Any ) -> List[Any]:
UpperCAmelCase : List[str] = np.random.randn(3 , 4 )
UpperCAmelCase : str = jnp.array(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , np.asarray(expand_dims(__snake_case , axis=1 ) ) ) )
| 23 | 0 |
'''simple docstring'''
import enum
import shutil
import sys
__a = shutil.get_terminal_size()
__a = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class UpperCAmelCase_ ( enum.Enum ):
"""simple docstring"""
lowercase = 0
lowercase = 1
def __snake_case( _lowerCAmelCase , _lowerCAmelCase="" ) -> Tuple:
sys.stdout.write(str(_lowerCAmelCase ) + end )
sys.stdout.flush()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="" ) -> List[str]:
forceWrite(f"\u001b[{color}m{content}\u001b[0m" , _lowerCAmelCase )
def __snake_case( ) -> Any:
forceWrite("""\r""" )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}" )
def __snake_case( ) -> List[str]:
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def __snake_case( ) -> int:
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 35 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase__: Union[str, Any] = "examples/"
UpperCamelCase__: Optional[Any] = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
UpperCamelCase__: Optional[int] = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
UpperCamelCase__: List[Any] = "README.md"
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ) -> Optional[int]:
with open(_lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase : Optional[int] = f.read()
UpperCAmelCase , UpperCAmelCase : List[Any] = REPLACE_PATTERNS[pattern]
UpperCAmelCase : List[Any] = replace.replace('''VERSION''' , _lowerCAmelCase )
UpperCAmelCase : Optional[Any] = re_pattern.sub(_lowerCAmelCase , _lowerCAmelCase )
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Any ) -> Optional[int]:
for folder, directories, fnames in os.walk(_lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , pattern='''examples''' )
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : str=False ) -> List[str]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not patch:
update_version_in_examples(_lowerCAmelCase )
def snake_case_ ( ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase : Optional[int] = '''1. Want to contribute a new model?'''
with open(_lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase : Optional[Any] = f.readlines()
# Find the start of the list.
UpperCAmelCase : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase : Optional[int] = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_lowerCAmelCase )
def snake_case_ ( ) -> Optional[Any]:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase : Union[str, Any] = f.read()
UpperCAmelCase : int = REPLACE_PATTERNS['''init'''][0].search(_lowerCAmelCase ).groups()[0]
return packaging.version.parse(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase : Optional[int] = default_version.base_version
elif patch:
UpperCAmelCase : Union[str, Any] = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase : Union[str, Any] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase : Dict = input(f"""Which version are you releasing? [{default_version}]""" )
if len(_lowerCAmelCase ) == 0:
UpperCAmelCase : Tuple = default_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowerCAmelCase , patch=_lowerCAmelCase )
def snake_case_ ( ) -> Any:
UpperCAmelCase : List[Any] = get_version()
UpperCAmelCase : List[str] = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase : List[Any] = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase : Optional[int] = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_lowerCAmelCase ) == 0:
UpperCAmelCase : Dict = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowerCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase__: Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
UpperCamelCase__: Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 23 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
UpperCamelCase__: Tuple = numpy.array([0, 0])
UpperCamelCase__: Union[str, Any] = numpy.array([0.5, 0.8660254])
UpperCamelCase__: Dict = numpy.array([1, 0])
UpperCamelCase__: int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] , _lowerCAmelCase : int ) -> list[numpy.ndarray]:
UpperCAmelCase : Union[str, Any] = initial_vectors
for _ in range(_lowerCAmelCase ):
UpperCAmelCase : Union[str, Any] = iteration_step(_lowerCAmelCase )
return vectors
def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] ) -> list[numpy.ndarray]:
UpperCAmelCase : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCAmelCase : List[str] = vectors[i + 1]
new_vectors.append(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def snake_case_ ( _lowerCAmelCase : numpy.ndarray , _lowerCAmelCase : float ) -> numpy.ndarray:
UpperCAmelCase : List[str] = numpy.radians(_lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase : Tuple = numpy.cos(_lowerCAmelCase ), numpy.sin(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] ) -> None:
UpperCAmelCase : List[Any] = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCAmelCase , UpperCAmelCase : str = zip(*_lowerCAmelCase )
plt.plot(_lowerCAmelCase , _lowerCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__: List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 23 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =10
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =[1, 2, 3, 4]
SCREAMING_SNAKE_CASE =[1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__snake_case ,self.block_size ,0 ) ,__snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
SCREAMING_SNAKE_CASE =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__snake_case ,self.block_size ,0 ) ,__snake_case )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
SCREAMING_SNAKE_CASE =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__snake_case ,self.block_size ,0 ) ,__snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE ='''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
SCREAMING_SNAKE_CASE =process_story(__snake_case )
self.assertEqual(__snake_case ,[] )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =''''''
SCREAMING_SNAKE_CASE =process_story(__snake_case )
self.assertEqual(__snake_case ,[] )
self.assertEqual(__snake_case ,[] )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =(
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
SCREAMING_SNAKE_CASE =process_story(__snake_case )
SCREAMING_SNAKE_CASE =[
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(__snake_case ,__snake_case )
SCREAMING_SNAKE_CASE =['''It was the best of times.''']
self.assertEqual(__snake_case ,__snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =torch.tensor([1, 2, 3, 4] )
SCREAMING_SNAKE_CASE =torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__snake_case ,0 ).numpy() ,expected.numpy() )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =torch.tensor([1, 2, 3, 4, 23, 23, 23] )
SCREAMING_SNAKE_CASE =torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__snake_case ,23 ).numpy() ,expected.numpy() )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =torch.tensor([8, 2, 3, 4, 1, 1, 1] )
SCREAMING_SNAKE_CASE =torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__snake_case ,1 ).numpy() ,expected.numpy() )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =101
SCREAMING_SNAKE_CASE =torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
SCREAMING_SNAKE_CASE =torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
SCREAMING_SNAKE_CASE =compute_token_type_ids(__snake_case ,__snake_case )
np.testing.assert_array_equal(__snake_case ,__snake_case )
| 334 |
'''simple docstring'''
from manim import *
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase : Tuple = [mem.copy() for i in range(6 )]
UpperCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase : Dict = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Any = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Union[str, Any] = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Optional[Any] = Text('''CPU''' , font_size=24 )
UpperCAmelCase : Union[str, Any] = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase : Union[str, Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : List[str] = Text('''GPU''' , font_size=24 )
UpperCAmelCase : Dict = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
UpperCAmelCase : int = [mem.copy() for i in range(6 )]
UpperCAmelCase : Union[str, Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : List[str] = Text('''Model''' , font_size=24 )
UpperCAmelCase : Tuple = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
UpperCAmelCase : Any = []
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
cpu_targs.append(__snake_case )
UpperCAmelCase : int = [mem.copy() for i in range(6 )]
UpperCAmelCase : int = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Any = Text('''Loaded Checkpoint''' , font_size=24 )
UpperCAmelCase : Union[str, Any] = Group(__snake_case , __snake_case ).arrange(__snake_case , aligned_edge=__snake_case , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase : str = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
UpperCAmelCase : Tuple = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase : List[Any] = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) , Write(__snake_case ) )
self.play(Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
UpperCAmelCase : Tuple = []
UpperCAmelCase : int = []
for i, rect in enumerate(__snake_case ):
UpperCAmelCase : Any = fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
first_animations.append(GrowFromCenter(__snake_case , run_time=1 ) )
UpperCAmelCase : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(*__snake_case )
self.wait()
| 23 | 0 |
import os
import sys
import unittest
_lowerCAmelCase : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_lowerCAmelCase : Optional[Any] = os.path.join(git_repo_path, "src", "transformers")
_lowerCAmelCase : List[Any] = "\n{0} = None\n"
_lowerCAmelCase : Optional[Any] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
_lowerCAmelCase : List[str] = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(__snake_case )
__a =find_backend(' if not is_tokenizers_available():' )
self.assertEqual(__snake_case , 'tokenizers' )
__a =find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(__snake_case , 'tensorflow_text' )
__a =find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(__snake_case , 'sentencepiece_and_tokenizers' )
__a =find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(__snake_case , 'sentencepiece_and_tensorflow_text' )
__a =find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(__snake_case , 'sentencepiece_and_tokenizers_and_vision' )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , __snake_case )
self.assertIn('tensorflow_text' , __snake_case )
self.assertIn('sentencepiece_and_tokenizers' , __snake_case )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(__snake_case , '\nCONSTANT = None\n' )
__a =create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
__snake_case , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
__a ='''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__a =create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(__snake_case , __snake_case )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a ='''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__a =create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , __snake_case )
| 218 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
UpperCamelCase__: str = None
UpperCamelCase__: int = {
"7B": 11008,
"13B": 13824,
"30B": 17920,
"65B": 22016,
"70B": 28672,
}
UpperCamelCase__: List[Any] = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : List[Any]=256 ) -> Optional[Any]:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def snake_case_ ( _lowerCAmelCase : List[str] ) -> str:
with open(_lowerCAmelCase , '''r''' ) as f:
return json.load(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any ) -> Optional[Any]:
with open(_lowerCAmelCase , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Any=True ) -> List[Any]:
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCAmelCase : int = os.path.join(_lowerCAmelCase , '''tmp''' )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCAmelCase : List[str] = read_json(os.path.join(_lowerCAmelCase , '''params.json''' ) )
UpperCAmelCase : str = NUM_SHARDS[model_size]
UpperCAmelCase : Any = params['''n_layers''']
UpperCAmelCase : str = params['''n_heads''']
UpperCAmelCase : Any = n_heads // num_shards
UpperCAmelCase : List[str] = params['''dim''']
UpperCAmelCase : Optional[Any] = dim // n_heads
UpperCAmelCase : str = 1_0_0_0_0.0
UpperCAmelCase : Optional[int] = 1.0 / (base ** (torch.arange(0 , _lowerCAmelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
UpperCAmelCase : Tuple = params['''n_kv_heads'''] # for GQA / MQA
UpperCAmelCase : Optional[int] = n_heads_per_shard // num_key_value_heads
UpperCAmelCase : Optional[Any] = dim // num_key_value_heads
else: # compatibility with other checkpoints
UpperCAmelCase : List[str] = n_heads
UpperCAmelCase : Optional[int] = n_heads_per_shard
UpperCAmelCase : List[str] = dim
# permute for sliced rotary
def permute(_lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any]=n_heads , _lowerCAmelCase : int=dim , _lowerCAmelCase : Dict=dim ):
return w.view(_lowerCAmelCase , dima // n_heads // 2 , 2 , _lowerCAmelCase ).transpose(1 , 2 ).reshape(_lowerCAmelCase , _lowerCAmelCase )
print(f"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
UpperCAmelCase : int = torch.load(os.path.join(_lowerCAmelCase , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
UpperCAmelCase : Optional[Any] = [
torch.load(os.path.join(_lowerCAmelCase , f"""consolidated.{i:02d}.pth""" ) , map_location='''cpu''' )
for i in range(_lowerCAmelCase )
]
UpperCAmelCase : Any = 0
UpperCAmelCase : str = {'''weight_map''': {}}
for layer_i in range(_lowerCAmelCase ):
UpperCAmelCase : Optional[Any] = f"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
UpperCAmelCase : Optional[int] = {
f"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wq.weight"""] ),
f"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wk.weight"""] ),
f"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[f"""layers.{layer_i}.attention.wv.weight"""],
f"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[f"""layers.{layer_i}.attention.wo.weight"""],
f"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w1.weight"""],
f"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w2.weight"""],
f"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w3.weight"""],
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[f"""layers.{layer_i}.attention_norm.weight"""],
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[f"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
UpperCAmelCase : List[str] = {
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.attention_norm.weight"""
].clone(),
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
UpperCAmelCase : Union[str, Any] = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wq.weight"""].view(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for i in range(_lowerCAmelCase )
] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase : Optional[Any] = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wk.weight"""].view(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for i in range(_lowerCAmelCase )
] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
UpperCAmelCase : str = torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wv.weight"""].view(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for i in range(_lowerCAmelCase )
] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Optional[int] = torch.cat(
[loaded[i][f"""layers.{layer_i}.attention.wo.weight"""] for i in range(_lowerCAmelCase )] , dim=1 )
UpperCAmelCase : Any = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(_lowerCAmelCase )] , dim=0 )
UpperCAmelCase : str = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(_lowerCAmelCase )] , dim=1 )
UpperCAmelCase : Tuple = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(_lowerCAmelCase )] , dim=0 )
UpperCAmelCase : Any = inv_freq
for k, v in state_dict.items():
UpperCAmelCase : List[Any] = filename
param_count += v.numel()
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase : Optional[int] = f"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
UpperCAmelCase : str = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
UpperCAmelCase : Any = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(_lowerCAmelCase )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(_lowerCAmelCase )] , dim=0 ),
}
for k, v in state_dict.items():
UpperCAmelCase : Optional[int] = filename
param_count += v.numel()
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
# Write configs
UpperCAmelCase : Union[str, Any] = {'''total_size''': param_count * 2}
write_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , '''pytorch_model.bin.index.json''' ) )
UpperCAmelCase : int = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
UpperCAmelCase : Tuple = params['''multiple_of'''] if '''multiple_of''' in params else 256
UpperCAmelCase : Any = LlamaConfig(
hidden_size=_lowerCAmelCase , intermediate_size=compute_intermediate_size(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=_lowerCAmelCase , )
config.save_pretrained(_lowerCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_lowerCAmelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=_lowerCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(_lowerCAmelCase , safe_serialization=_lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ) -> List[str]:
# Initialize the tokenizer based on the `spm` model
UpperCAmelCase : Dict = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
UpperCAmelCase : List[Any] = tokenizer_class(_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
def snake_case_ ( ) -> List[Any]:
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=_lowerCAmelCase , help='''Whether or not to save using `safetensors`.''' )
UpperCAmelCase : List[Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
UpperCAmelCase : Optional[int] = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 23 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_A : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase_ ( snake_case_ : Union[List, PIL.Image.Image, torch.Tensor] ) -> List[str]:
'''simple docstring'''
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , _lowerCAmelCase , )
if isinstance(_lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
__lowerCAmelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
__lowerCAmelCase = image[0].size
__lowerCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__lowerCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
__lowerCAmelCase = np.concatenate(_lowerCAmelCase , axis=0 )
__lowerCAmelCase = np.array(_lowerCAmelCase ).astype(np.floataa ) / 2_55.0
__lowerCAmelCase = image.transpose(0 , 3 , 1 , 2 )
__lowerCAmelCase = 2.0 * image - 1.0
__lowerCAmelCase = torch.from_numpy(_lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
__lowerCAmelCase = torch.cat(_lowerCAmelCase , dim=0 )
return image
def UpperCamelCase_ ( snake_case_ : Union[List, PIL.Image.Image, torch.Tensor] ) -> Tuple:
'''simple docstring'''
if isinstance(_lowerCAmelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
__lowerCAmelCase = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__lowerCAmelCase = mask[0].size
__lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowerCAmelCase = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
__lowerCAmelCase = np.concatenate(_lowerCAmelCase , axis=0 )
__lowerCAmelCase = mask.astype(np.floataa ) / 2_55.0
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = torch.from_numpy(_lowerCAmelCase )
elif isinstance(mask[0] , torch.Tensor ):
__lowerCAmelCase = torch.cat(_lowerCAmelCase , dim=0 )
return mask
class _lowercase ( A__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = 42
_SCREAMING_SNAKE_CASE : int = 42
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[torch.Tensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE__ : Union[torch.Tensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE__ : int = 2_50 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
__lowerCAmelCase = image
__lowerCAmelCase = _preprocess_image(__snake_case )
__lowerCAmelCase = original_image.to(device=self.device , dtype=self.unet.dtype )
__lowerCAmelCase = _preprocess_mask(__snake_case )
__lowerCAmelCase = mask_image.to(device=self.device , dtype=self.unet.dtype )
__lowerCAmelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__lowerCAmelCase = original_image.shape
__lowerCAmelCase = randn_tensor(__snake_case , generator=__snake_case , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__snake_case , __snake_case , __snake_case , self.device )
__lowerCAmelCase = eta
__lowerCAmelCase = self.scheduler.timesteps[0] + 1
__lowerCAmelCase = generator[0] if isinstance(__snake_case , __snake_case ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__lowerCAmelCase = self.unet(__snake_case , __snake_case ).sample
# compute previous image: x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__lowerCAmelCase = self.scheduler.undo_step(__snake_case , __snake_case , __snake_case )
__lowerCAmelCase = t
__lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 229 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = cos(_lowerCAmelCase )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : Any = (1 - _cos) / 2
UpperCAmelCase : List[Any] = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : Dict = 1 - alpha
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : Any = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(_lowerCAmelCase )
UpperCAmelCase : Tuple = cos(_lowerCAmelCase )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Tuple = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : Optional[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : Optional[int] = sin(_lowerCAmelCase )
UpperCAmelCase : Tuple = cos(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = _sin / 2
UpperCAmelCase : Any = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : Optional[Any] = 1 + alpha
UpperCAmelCase : List[Any] = -2 * _cos
UpperCAmelCase : Optional[Any] = 1 - alpha
UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : List[str] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase )
UpperCAmelCase : str = cos(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 1 - alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase : Optional[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = cos(_lowerCAmelCase )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : str = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Union[str, Any] = -2 * _cos
UpperCAmelCase : Optional[Any] = 1 - alpha * big_a
UpperCAmelCase : Union[str, Any] = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : Any = 1 - alpha / big_a
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase : Any = tau * frequency / samplerate
UpperCAmelCase : Optional[int] = sin(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = cos(_lowerCAmelCase )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : Optional[int] = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : int = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Optional[int] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : str = 2 * sqrt(_lowerCAmelCase ) * alpha
UpperCAmelCase : Dict = big_a * (pmc + aaa)
UpperCAmelCase : Any = 2 * big_a * mpc
UpperCAmelCase : Union[str, Any] = big_a * (pmc - aaa)
UpperCAmelCase : Optional[int] = ppmc + aaa
UpperCAmelCase : Optional[Any] = -2 * pmpc
UpperCAmelCase : Optional[Any] = ppmc - aaa
UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = cos(_lowerCAmelCase )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : int = 10 ** (gain_db / 40)
UpperCAmelCase : List[str] = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(_lowerCAmelCase ) * alpha
UpperCAmelCase : Any = big_a * (ppmc + aaa)
UpperCAmelCase : str = -2 * big_a * pmpc
UpperCAmelCase : List[Any] = big_a * (ppmc - aaa)
UpperCAmelCase : Optional[Any] = pmc + aaa
UpperCAmelCase : Any = 2 * mpc
UpperCAmelCase : str = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 23 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_a = None
_a = logging.get_logger(__name__)
_a = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_a = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_a = {
"camembert-base": 512,
}
_a = "▁"
class __A ( A__ ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase_ = CamembertTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
__snake_case , tokenizer_file=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 209 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ) -> bool:
UpperCAmelCase : str = get_failure_array(_lowerCAmelCase )
# 2) Step through text searching for pattern
UpperCAmelCase , UpperCAmelCase : Optional[Any] = 0, 0 # index into text, pattern
while i < len(_lowerCAmelCase ):
if pattern[j] == text[i]:
if j == (len(_lowerCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCAmelCase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( _lowerCAmelCase : str ) -> list[int]:
UpperCAmelCase : Optional[Any] = [0]
UpperCAmelCase : str = 0
UpperCAmelCase : List[str] = 1
while j < len(_lowerCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCAmelCase : Union[str, Any] = failure[i - 1]
continue
j += 1
failure.append(_lowerCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
UpperCamelCase__: str = "abc1abc12"
UpperCamelCase__: str = "alskfjaldsabc1abc1abc12k23adsfabcabc"
UpperCamelCase__: Any = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCamelCase__: Tuple = "ABABX"
UpperCamelCase__: Union[str, Any] = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
UpperCamelCase__: Any = "AAAB"
UpperCamelCase__: str = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
UpperCamelCase__: int = "abcdabcy"
UpperCamelCase__: Any = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
UpperCamelCase__: List[str] = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 23 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase__ ( unittest.TestCase ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
__a = ["""accelerate""", """launch"""]
__a = Path.home() / """.cache/huggingface/accelerate"""
__a = """default_config.yaml"""
__a = config_folder / config_file
__a = config_folder / """_default_config.yaml"""
__a = Path("""tests/test_configs""" )
@classmethod
def lowercase ( cls : int ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase ( cls : Dict ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase ( self : Union[str, Any] ):
_snake_case = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase ( self : Any ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=__snake_case ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(__snake_case ), self.test_file_path] , env=os.environ.copy() )
def lowercase ( self : str ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowerCAmelCase__ ( unittest.TestCase ):
__a = """test-tpu"""
__a = """us-central1-a"""
__a = """ls"""
__a = ["""accelerate""", """tpu-config"""]
__a = """cd /usr/share"""
__a = """tests/test_samples/test_command_file.sh"""
__a = """Running gcloud compute tpus tpu-vm ssh"""
def lowercase ( self : List[Any] ):
_snake_case = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __snake_case , )
def lowercase ( self : Union[str, Any] ):
_snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __snake_case , )
def lowercase ( self : str ):
_snake_case = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__snake_case )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all''' , __snake_case , )
def lowercase ( self : str ):
_snake_case = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __snake_case , )
def lowercase ( self : Optional[int] ):
_snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all''' , __snake_case , )
def lowercase ( self : Union[str, Any] ):
_snake_case = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all''' , __snake_case , )
def lowercase ( self : Tuple ):
_snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all''' , __snake_case , )
def lowercase ( self : int ):
_snake_case = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all''' , __snake_case , )
def lowercase ( self : Union[str, Any] ):
_snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all''' , __snake_case , )
| 288 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCamelCase__: int = logging.get_logger(__name__)
UpperCamelCase__: Dict = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
UpperCamelCase__: Optional[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def snake_case_ ( _lowerCAmelCase : str ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = {}
with open(_lowerCAmelCase , '''r''' ) as file:
for line_number, line in enumerate(_lowerCAmelCase ):
UpperCAmelCase : List[str] = line.strip()
if line:
UpperCAmelCase : str = line.split()
UpperCAmelCase : Union[str, Any] = line_number
UpperCAmelCase : List[Any] = words[0]
UpperCAmelCase : Union[str, Any] = value
return result
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ) -> int:
for attribute in key.split('''.''' ):
UpperCAmelCase : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Dict = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCAmelCase ):
UpperCAmelCase : Any = PARAM_MAPPING[full_name.split('''.''' )[-1]]
UpperCAmelCase : Dict = '''param'''
if weight_type is not None and weight_type != "param":
UpperCAmelCase : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase : List[Any] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
UpperCAmelCase : Optional[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : int = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase : Union[str, Any] = value[0]
else:
UpperCAmelCase : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase : int = value
elif weight_type == "weight_g":
UpperCAmelCase : str = value
elif weight_type == "weight_v":
UpperCAmelCase : Dict = value
elif weight_type == "bias":
UpperCAmelCase : str = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
UpperCAmelCase : int = getattr(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Optional[int] = value
else:
UpperCAmelCase : Tuple = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] ) -> List[Any]:
UpperCAmelCase : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCAmelCase ):
UpperCAmelCase : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
UpperCAmelCase : Any = '''param'''
if weight_type is not None and weight_type != "param":
UpperCAmelCase : Optional[int] = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase : Optional[int] = '''.'''.join([key, hf_param_name] )
else:
UpperCAmelCase : List[Any] = key
UpperCAmelCase : Tuple = value if '''lm_head''' in full_key else value[0]
UpperCamelCase__: Tuple = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any=None , _lowerCAmelCase : Optional[Any]=None ) -> int:
UpperCAmelCase : List[Any] = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase : Optional[Any] = True
if "*" in mapped_key:
UpperCAmelCase : Tuple = name.split(_lowerCAmelCase )[0].split('''.''' )[-2]
UpperCAmelCase : List[Any] = mapped_key.replace('''*''' , _lowerCAmelCase )
if "weight_g" in name:
UpperCAmelCase : str = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase : int = '''weight_v'''
elif "bias" in name:
UpperCAmelCase : int = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : List[str] = '''weight'''
else:
UpperCAmelCase : Dict = None
if hf_dict is not None:
rename_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return is_used
return is_used
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ) -> Any:
UpperCAmelCase : Dict = []
UpperCAmelCase : Dict = fairseq_model.state_dict()
UpperCAmelCase : Union[str, Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase : Any = True
else:
UpperCAmelCase : Optional[Any] = load_wavaveca_layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Any = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase : Optional[int] = name.split('''.''' )
UpperCAmelCase : Tuple = int(items[0] )
UpperCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[int]=False ) -> Dict:
if config_path is not None:
UpperCAmelCase : List[str] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
else:
UpperCAmelCase : List[Any] = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase : Optional[Any] = read_txt_into_dict(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = idalabel
UpperCAmelCase : Optional[Any] = WavaVecaForSequenceClassification(_lowerCAmelCase )
UpperCAmelCase : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
feature_extractor.save_pretrained(_lowerCAmelCase )
elif is_finetuned:
if dict_path:
UpperCAmelCase : Dict = Dictionary.load(_lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase : Any = target_dict.pad_index
UpperCAmelCase : Tuple = target_dict.bos_index
UpperCAmelCase : Optional[int] = target_dict.eos_index
UpperCAmelCase : Union[str, Any] = len(target_dict.symbols )
UpperCAmelCase : Dict = os.path.join(_lowerCAmelCase , '''vocab.json''' )
if not os.path.isdir(_lowerCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowerCAmelCase ) )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCAmelCase : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[str] = 1
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Optional[int] = WavaVecaCTCTokenizer(
_lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowerCAmelCase , )
UpperCAmelCase : int = True if config.feat_extract_norm == '''layer''' else False
UpperCAmelCase : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
UpperCAmelCase : str = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = WavaVecaForCTC(_lowerCAmelCase )
else:
UpperCAmelCase : Dict = WavaVecaForPreTraining(_lowerCAmelCase )
if is_finetuned or is_seq_class:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
UpperCAmelCase : Optional[Any] = argparse.Namespace(task='''audio_pretraining''' )
UpperCAmelCase : List[Any] = fairseq.tasks.setup_task(_lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCAmelCase )
UpperCAmelCase : Optional[int] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase__: Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
UpperCamelCase__: Any = parser.parse_args()
UpperCamelCase__: int = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 23 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __snake_case ( A__ , A__ , A__ , unittest.TestCase ):
lowerCAmelCase_ = StableUnCLIPImgaImgPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase_ = frozenset([] )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE__ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = StableUnCLIPImageNormalizer(embedding_dim=__snake_case )
SCREAMING_SNAKE_CASE__ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="""v_prediction""" , set_alpha_to_one=__snake_case , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL()
SCREAMING_SNAKE_CASE__ = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def __a ( self : Dict , _lowercase : int , _lowercase : Union[str, Any]=0 , _lowercase : Union[str, Any]=True ):
"""simple docstring"""
if str(__snake_case ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__snake_case )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
if pil_image:
SCREAMING_SNAKE_CASE__ = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE__ = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE__ = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline(**__snake_case )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__snake_case )
inputs.update({"""image_embeds""": None} )
SCREAMING_SNAKE_CASE__ = sd_pipe(**__snake_case ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__snake_case )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__snake_case )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __a ( self : Optional[Any] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(__snake_case , """anime turle""" , generator=__snake_case , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(__snake_case , """anime turle""" , generator=__snake_case , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = pipe(
__snake_case , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 219 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : str ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case , cache_dir=__snake_case )
UpperCAmelCase : str = [t[-1] for t in os.walk(os.path.join(__snake_case , os.listdir(__snake_case )[0] , '''snapshots''' ) )]
UpperCAmelCase : str = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] ) -> Dict:
UpperCAmelCase , UpperCAmelCase : str = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case )
UpperCAmelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : Optional[Any] = jax.device_count()
UpperCAmelCase : Tuple = num_samples * [prompt]
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : Any = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[Any] = shard(__snake_case )
UpperCAmelCase : Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(__snake_case , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
UpperCAmelCase : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__snake_case ) == num_samples
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__snake_case )
UpperCAmelCase : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Optional[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase : Any = 50
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : int = num_samples * [prompt]
UpperCAmelCase : Union[str, Any] = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Dict = replicate(__snake_case )
UpperCAmelCase : int = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Tuple = shard(__snake_case )
UpperCAmelCase : Tuple = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def A ( self : int ) -> Dict:
UpperCAmelCase , UpperCAmelCase : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case )
UpperCAmelCase : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Union[str, Any] = jax.random.PRNGKey(0 )
UpperCAmelCase : List[str] = 50
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : List[Any] = num_samples * [prompt]
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : List[Any] = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = shard(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def A ( self : int ) -> Any:
UpperCAmelCase , UpperCAmelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
UpperCAmelCase : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
UpperCAmelCase : Union[str, Any] = 50
UpperCAmelCase : Optional[int] = jax.device_count()
UpperCAmelCase : List[str] = num_samples * [prompt]
UpperCAmelCase : Dict = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : Any = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : str = shard(__snake_case )
UpperCAmelCase : Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : int = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__snake_case , steps_offset=1 , )
UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__snake_case , safety_checker=__snake_case , )
UpperCAmelCase : Tuple = scheduler.create_state()
UpperCAmelCase : Dict = scheduler_state
UpperCAmelCase : str = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : int = jax.random.PRNGKey(0 )
UpperCAmelCase : Union[str, Any] = 50
UpperCAmelCase : Optional[Any] = jax.device_count()
UpperCAmelCase : Any = num_samples * [prompt]
UpperCAmelCase : Dict = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : str = replicate(__snake_case )
UpperCAmelCase : List[str] = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = shard(__snake_case )
UpperCAmelCase : Dict = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def A ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : List[Any] = num_samples * [prompt]
UpperCAmelCase : str = jax.random.split(jax.random.PRNGKey(0 ) , __snake_case )
UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , )
UpperCAmelCase : Dict = replicate(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline.prepare_inputs(__snake_case )
UpperCAmelCase : List[str] = shard(__snake_case )
UpperCAmelCase : Any = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase : Optional[int] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCAmelCase , UpperCAmelCase : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , use_memory_efficient_attention=__snake_case , )
UpperCAmelCase : int = replicate(__snake_case )
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
UpperCAmelCase : List[Any] = shard(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase : int = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 23 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__snake_case :List[Any] = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase )
__a = path + '''.py'''
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
inspect_metric(_lowerCAmelCase , _lowerCAmelCase )
__a = path + '''.py'''
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = get_dataset_config_names(_lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = get_dataset_infos(_lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
__a = expected_configs[0]
assert expected_config in infos
__a = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = get_dataset_infos(_lowerCAmelCase )
assert expected_config in infos
__a = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase )
| 49 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any]=1000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCAmelCase : str = n - 1
UpperCAmelCase : List[Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCAmelCase : List[str] = 0
while count < prec:
UpperCAmelCase : int = random.randint(2 , n - 1 )
UpperCAmelCase : List[str] = bin_exp_mod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if b != 1:
UpperCAmelCase : int = True
for _ in range(_lowerCAmelCase ):
if b == n - 1:
UpperCAmelCase : Dict = False
break
UpperCAmelCase : str = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
UpperCamelCase__: Optional[int] = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 23 | 0 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase ( unittest.TestCase ):
def __init__( self ,A__ ,A__=1_3 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=True ,A__=9_9 ,A__=3_2 ,A__=5 ,A__=4 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=4 ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length])
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
lowercase = RobertaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__snake_case ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
lowercase = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
lowercase = config_and_inputs
lowercase = True
lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase ( A__ , unittest.TestCase ):
lowercase_ : Any =True
lowercase_ : Optional[Any] =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ ( self):
lowercase = FlaxRobertaModelTester(self)
@slow
def A__ ( self):
for model_class_name in self.all_model_classes:
lowercase = model_class_name.from_pretrained('''roberta-base''' ,from_pt=__snake_case)
lowercase = model(np.ones((1, 1)))
self.assertIsNotNone(__snake_case)
| 101 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__: Optional[int] = logging.get_logger(__name__)
def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Tuple = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
UpperCAmelCase : Tuple = 1024
UpperCAmelCase : List[Any] = 4096
UpperCAmelCase : str = 24
UpperCAmelCase : List[Any] = 16
UpperCAmelCase : str = [5, 11, 17, 23]
UpperCAmelCase : List[Any] = [256, 512, 1024, 1024]
UpperCAmelCase : Tuple = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 768
UpperCAmelCase : Tuple = [1, 1, 1, 0.5]
UpperCAmelCase : int = [256, 512, 768, 768]
UpperCAmelCase : Any = 150
UpperCAmelCase : Tuple = 16
UpperCAmelCase : Any = (1, 384, 384)
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Tuple = '''project'''
if "ade" in checkpoint_url:
UpperCAmelCase : Any = True
UpperCAmelCase : str = 768
UpperCAmelCase : Optional[int] = [1, 1, 1, 0.5]
UpperCAmelCase : List[Any] = 150
UpperCAmelCase : List[Any] = 16
UpperCAmelCase : str = '''huggingface/label-files'''
UpperCAmelCase : Tuple = '''ade20k-id2label.json'''
UpperCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : List[Any] = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = [1, 150, 480, 480]
return config, expected_shape
def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> int:
UpperCAmelCase : List[str] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Tuple ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase : Tuple = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
UpperCAmelCase : int = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
UpperCAmelCase : Tuple = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
UpperCAmelCase : Any = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
UpperCAmelCase : str = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
UpperCAmelCase : Any = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
UpperCAmelCase : Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase : Optional[Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
UpperCAmelCase : Dict = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
UpperCAmelCase : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
UpperCAmelCase : Tuple = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
UpperCAmelCase : str = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
UpperCAmelCase : Dict = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
UpperCAmelCase : int = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
UpperCAmelCase : Tuple = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
UpperCAmelCase : int = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
UpperCAmelCase : List[str] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase : str = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
UpperCAmelCase : List[str] = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
UpperCAmelCase : Any = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
UpperCAmelCase : Optional[int] = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
UpperCAmelCase : Tuple = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase : Dict = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase : int = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase : Optional[Any] = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase : List[Any] = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase : Optional[int] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase : str = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase : List[str] = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase : Tuple = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase : int = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
UpperCAmelCase : Optional[int] = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
UpperCAmelCase : Dict = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
UpperCAmelCase : Any = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
UpperCAmelCase : Optional[int] = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
UpperCAmelCase : List[Any] = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
UpperCAmelCase : Optional[int] = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
UpperCAmelCase : Optional[Any] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
UpperCAmelCase : Optional[int] = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
UpperCAmelCase : List[Any] = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
UpperCAmelCase : List[str] = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
UpperCAmelCase : List[Any] = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
UpperCAmelCase : List[Any] = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
UpperCAmelCase : Tuple = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Optional[int] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
UpperCAmelCase : Tuple = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Tuple = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : int = in_proj_bias[: config.hidden_size]
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ) -> Any:
UpperCAmelCase , UpperCAmelCase : int = get_dpt_config(_lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
UpperCAmelCase : List[Any] = torch.load(_lowerCAmelCase , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(_lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase : Any = state_dict.pop(_lowerCAmelCase )
UpperCAmelCase : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
UpperCAmelCase : Optional[Any] = DPTForSemanticSegmentation(_lowerCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Check outputs on an image
UpperCAmelCase : int = 480 if '''ade''' in checkpoint_url else 384
UpperCAmelCase : List[Any] = DPTImageProcessor(size=_lowerCAmelCase )
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(_lowerCAmelCase , return_tensors='''pt''' )
# forward pass
UpperCAmelCase : Any = model(**_lowerCAmelCase ).logits if '''ade''' in checkpoint_url else model(**_lowerCAmelCase ).predicted_depth
if show_prediction:
UpperCAmelCase : Dict = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=_lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
UpperCamelCase__: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
UpperCamelCase__: Tuple = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 23 | 0 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase( UpperCamelCase_ , UpperCamelCase_ = True , UpperCamelCase_ = math.inf , UpperCamelCase_ = -math.inf , UpperCamelCase_ = math.inf , UpperCamelCase_ = -math.inf , UpperCamelCase_ = False , UpperCamelCase_ = 100 , UpperCamelCase_ = 0.0_1 , UpperCamelCase_ = 1 , ) -> Any:
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = search_prob
UpperCamelCase = start_temperate
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = None
while not search_end:
UpperCamelCase = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCamelCase = current_state
scores.append(_lowerCAmelCase )
iterations += 1
UpperCamelCase = None
UpperCamelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCamelCase = random.randint(0 , len(_lowerCAmelCase ) - 1 ) # picking a random neighbor
UpperCamelCase = neighbors.pop(_lowerCAmelCase )
UpperCamelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCamelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCamelCase = picked_neighbor
else:
UpperCamelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCamelCase = picked_neighbor
UpperCamelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCamelCase = True
else:
UpperCamelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowerCAmelCase ) , _lowerCAmelCase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_SCREAMING_SNAKE_CASE = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
_SCREAMING_SNAKE_CASE = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
_SCREAMING_SNAKE_CASE = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
_SCREAMING_SNAKE_CASE = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
return (3 * x**2) - (6 * y)
_SCREAMING_SNAKE_CASE = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_SCREAMING_SNAKE_CASE = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
_SCREAMING_SNAKE_CASE = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_SCREAMING_SNAKE_CASE = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 343 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase__: Optional[int] = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 23 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
__a = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=8 ) -> Any:
snake_case__ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case__ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase_ ( A__ ):
"""simple docstring"""
def __init__( self : List[str] , snake_case_ : UNetaDConditionModel , snake_case_ : DDPMScheduler , snake_case_ : VQModel , ):
super().__init__()
self.register_modules(
unet=__snake_case , scheduler=__snake_case , movq=__snake_case , )
snake_case__ : Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : int ):
if latents is None:
snake_case__ : Optional[int] = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case__ : List[Any] = latents.to(__snake_case )
snake_case__ : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase ( self : Dict , snake_case_ : Dict=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
snake_case__ : Tuple = torch.device(f"cuda:{gpu_id}" )
snake_case__ : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__snake_case , __snake_case )
def lowerCamelCase ( self : List[str] , snake_case_ : Tuple=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
snake_case__ : Dict = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case__ : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case__ : Any = cpu_offload_with_hook(__snake_case , __snake_case , prev_module_hook=__snake_case )
# We'll offload the last model manually.
snake_case__ : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase ( self : List[str] ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__snake_case , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__snake_case )
def __call__( self : Tuple , snake_case_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case_ : int = 512 , snake_case_ : int = 512 , snake_case_ : int = 100 , snake_case_ : float = 4.0 , snake_case_ : int = 1 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[torch.FloatTensor] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
snake_case__ : int = self._execution_device
snake_case__ : Optional[int] = guidance_scale > 1.0
if isinstance(__snake_case , __snake_case ):
snake_case__ : Union[str, Any] = torch.cat(__snake_case , dim=0 )
snake_case__ : str = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__snake_case , __snake_case ):
snake_case__ : Tuple = torch.cat(__snake_case , dim=0 )
if do_classifier_free_guidance:
snake_case__ : Optional[Any] = image_embeds.repeat_interleave(__snake_case , dim=0 )
snake_case__ : int = negative_image_embeds.repeat_interleave(__snake_case , dim=0 )
snake_case__ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__snake_case )
self.scheduler.set_timesteps(__snake_case , device=__snake_case )
snake_case__ : Optional[int] = self.scheduler.timesteps
snake_case__ : Dict = self.unet.config.in_channels
snake_case__ : str = downscale_height_and_width(__snake_case , __snake_case , self.movq_scale_factor )
# create initial latent
snake_case__ : Optional[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __snake_case , __snake_case , __snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the latents if we are doing classifier free guidance
snake_case__ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case__ : str = {'''image_embeds''': image_embeds}
snake_case__ : Tuple = self.unet(
sample=__snake_case , timestep=__snake_case , encoder_hidden_states=__snake_case , added_cond_kwargs=__snake_case , return_dict=__snake_case , )[0]
if do_classifier_free_guidance:
snake_case__ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
snake_case__ : str = noise_pred.chunk(2 )
snake_case__ : int = variance_pred.chunk(2 )
snake_case__ : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case__ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case__ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case__ : Optional[int] = self.scheduler.step(
__snake_case , __snake_case , __snake_case , generator=__snake_case , )[0]
# post-processing
snake_case__ : Optional[int] = self.movq.decode(__snake_case , force_not_quantize=__snake_case )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case__ : List[Any] = image * 0.5 + 0.5
snake_case__ : int = image.clamp(0 , 1 )
snake_case__ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case__ : Optional[Any] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 35 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : list[int | float] , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | float:
if len(_lowerCAmelCase ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(_lowerCAmelCase )
or left < -len(_lowerCAmelCase )
or right >= len(_lowerCAmelCase )
or right < -len(_lowerCAmelCase )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
UpperCAmelCase : List[Any] = (left + right) >> 1 # the middle
UpperCAmelCase : Optional[Any] = find_max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # find max in range[left, mid]
UpperCAmelCase : Dict = find_max(_lowerCAmelCase , mid + 1 , _lowerCAmelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 23 | 0 |
import os
import numpy
import onnx
def lowerCamelCase__ ( a__ : str , a__ : Optional[Any] ) -> int:
UpperCamelCase_ = a.name
UpperCamelCase_ = b.name
UpperCamelCase_ = ''''''
UpperCamelCase_ = ''''''
UpperCamelCase_ = a == b
UpperCamelCase_ = name_a
UpperCamelCase_ = name_b
return res
def lowerCamelCase__ ( a__ : str , a__ : Any , a__ : Tuple ) -> Any:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCAmelCase , _lowerCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCAmelCase , _lowerCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCAmelCase , _lowerCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCAmelCase , _lowerCAmelCase )
def lowerCamelCase__ ( a__ : int , a__ : Any , a__ : Optional[Any] ) -> Any:
for n in graph_proto.node:
_node_replace_input_with(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCamelCase__ ( a__ : str , a__ : List[Any] , a__ : List[str] ) -> int:
UpperCamelCase_ = list(model.graph.initializer )
UpperCamelCase_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCamelCase_ = inits[i].name
UpperCamelCase_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCAmelCase , _lowerCAmelCase )
def lowerCamelCase__ ( a__ : int ) -> Tuple:
UpperCamelCase_ = os.path.dirname(_lowerCAmelCase )
UpperCamelCase_ = os.path.basename(_lowerCAmelCase )
UpperCamelCase_ = onnx.load(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCamelCase_ = list(model.graph.initializer )
UpperCamelCase_ = set()
UpperCamelCase_ = {}
UpperCamelCase_ = []
UpperCamelCase_ = 0
for i in range(len(_lowerCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_lowerCAmelCase )
dup_set.add(_lowerCAmelCase )
UpperCamelCase_ = inits[j].data_type
UpperCamelCase_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , _lowerCAmelCase )
total_reduced_size += mem_size
UpperCamelCase_ = inits[i].name
UpperCamelCase_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCAmelCase )
else:
UpperCamelCase_ = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
UpperCamelCase_ = sorted(_lowerCAmelCase )
_remove_dup_initializers_from_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase_ = '''optimized_''' + model_file_name
UpperCamelCase_ = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
onnx.save(_lowerCAmelCase , _lowerCAmelCase )
return new_model
| 122 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self : Union[str, Any] , __snake_case : UNetaDModel , __snake_case : ScoreSdeVeScheduler ) -> int:
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : Optional[int] , __snake_case : int = 1 , __snake_case : int = 2000 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , **__snake_case : Optional[int] , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase : str = self.unet.config.sample_size
UpperCAmelCase : Union[str, Any] = (batch_size, 3, img_size, img_size)
UpperCAmelCase : int = self.unet
UpperCAmelCase : Any = randn_tensor(__snake_case , generator=__snake_case ) * self.scheduler.init_noise_sigma
UpperCAmelCase : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(__snake_case )
self.scheduler.set_sigmas(__snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase : Any = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase : Union[str, Any] = self.unet(__snake_case , __snake_case ).sample
UpperCAmelCase : Optional[Any] = self.scheduler.step_correct(__snake_case , __snake_case , generator=__snake_case ).prev_sample
# prediction step
UpperCAmelCase : Optional[Any] = model(__snake_case , __snake_case ).sample
UpperCAmelCase : List[str] = self.scheduler.step_pred(__snake_case , __snake_case , __snake_case , generator=__snake_case )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = output.prev_sample, output.prev_sample_mean
UpperCAmelCase : int = sample_mean.clamp(0 , 1 )
UpperCAmelCase : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Optional[Any] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__snake_case )
| 23 | 0 |
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
SCREAMING_SNAKE_CASE =mf_knapsack(i - 1, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE =max(
mf_knapsack(i - 1, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ), mf_knapsack(i - 1, _lowerCAmelCase, _lowerCAmelCase, j - wt[i - 1] ) + val[i - 1], )
SCREAMING_SNAKE_CASE =val
return f[i][j]
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1, n + 1 ):
for w_ in range(1, w + 1 ):
if wt[i - 1] <= w_:
SCREAMING_SNAKE_CASE =max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]], dp[i - 1][w_] )
else:
SCREAMING_SNAKE_CASE =dp[i - 1][w_]
return dp[n][w_], dp
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if not (isinstance(_lowerCAmelCase, (list, tuple) ) and isinstance(_lowerCAmelCase, (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
SCREAMING_SNAKE_CASE =len(_lowerCAmelCase )
if num_items != len(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE =(
'''The number of weights must be the same as the number of values.\n'''
F'But got {num_items} weights and {len(_lowerCAmelCase )} values'
)
raise ValueError(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
if not isinstance(wt[i], _lowerCAmelCase ):
SCREAMING_SNAKE_CASE =(
'''All weights must be integers but got weight of '''
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(_lowerCAmelCase )
SCREAMING_SNAKE_CASE =knapsack(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
SCREAMING_SNAKE_CASE =set()
_construct_solution(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
return optimal_val, example_optional_set
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowerCAmelCase, _lowerCAmelCase, i - 1, _lowerCAmelCase, _lowerCAmelCase )
else:
optimal_set.add(_lowerCAmelCase )
_construct_solution(_lowerCAmelCase, _lowerCAmelCase, i - 1, j - wt[i - 1], _lowerCAmelCase )
if __name__ == "__main__":
_lowerCamelCase =[3, 2, 4, 4]
_lowerCamelCase =[4, 3, 2, 3]
_lowerCamelCase =4
_lowerCamelCase =6
_lowerCamelCase =[[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_lowerCamelCase =knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_lowerCamelCase =knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 334 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """MCTCTFeatureExtractor"""
lowerCamelCase__ = """AutoTokenizer"""
def __init__( self : Dict , __snake_case : Optional[int] , __snake_case : List[str] ) -> str:
super().__init__(__snake_case , __snake_case )
UpperCAmelCase : List[Any] = self.feature_extractor
UpperCAmelCase : Union[str, Any] = False
def __call__( self : Any , *__snake_case : List[str] , **__snake_case : Any ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
UpperCAmelCase : int = kwargs.pop('''raw_speech''' )
else:
UpperCAmelCase : Union[str, Any] = kwargs.pop('''audio''' , __snake_case )
UpperCAmelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __snake_case )
UpperCAmelCase : Dict = kwargs.pop('''text''' , __snake_case )
if len(__snake_case ) > 0:
UpperCAmelCase : Any = args[0]
UpperCAmelCase : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
UpperCAmelCase : List[str] = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
UpperCAmelCase : int = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase : str = encodings['''input_ids''']
return inputs
def A ( self : List[Any] , *__snake_case : List[Any] , **__snake_case : List[Any] ) -> str:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def A ( self : List[Any] , *__snake_case : int , **__snake_case : Optional[int] ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__snake_case , **__snake_case )
UpperCAmelCase : List[Any] = kwargs.pop('''input_features''' , __snake_case )
UpperCAmelCase : Optional[Any] = kwargs.pop('''labels''' , __snake_case )
if len(__snake_case ) > 0:
UpperCAmelCase : List[str] = args[0]
UpperCAmelCase : List[Any] = args[1:]
if input_features is not None:
UpperCAmelCase : Tuple = self.feature_extractor.pad(__snake_case , *__snake_case , **__snake_case )
if labels is not None:
UpperCAmelCase : Optional[int] = self.tokenizer.pad(__snake_case , **__snake_case )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase : List[str] = labels['''input_ids''']
return input_features
def A ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@contextmanager
def A ( self : Any ) -> Optional[int]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
UpperCAmelCase : Dict = True
UpperCAmelCase : List[Any] = self.tokenizer
yield
UpperCAmelCase : Tuple = self.feature_extractor
UpperCAmelCase : List[Any] = False
| 23 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( A__ , A__ , A__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = AltDiffusionPipeline
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
def __magic_name__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__a =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__a =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
__a =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__a =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
__a =CLIPTextModel(__snake_case )
__a =XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
__a =77
__a ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__ ( self , __snake_case , __snake_case=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(__snake_case ).startswith('mps' ):
__a =torch.manual_seed(__snake_case )
else:
__a =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__a ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a ='''cpu''' # ensure determinism for the device-dependent torch.Generator
__a =self.get_dummy_components()
torch.manual_seed(0 )
__a =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
__a =RobertaSeriesModelWithTransformation(__snake_case )
__a =text_encoder
__a =AltDiffusionPipeline(**__snake_case )
__a =alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_dummy_inputs(__snake_case )
__a ='''A photo of an astronaut'''
__a =alt_pipe(**__snake_case )
__a =output.images
__a =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a =np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a ='''cpu''' # ensure determinism for the device-dependent torch.Generator
__a =self.get_dummy_components()
__a =PNDMScheduler(skip_prk_steps=__snake_case )
torch.manual_seed(0 )
__a =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
__a =RobertaSeriesModelWithTransformation(__snake_case )
__a =text_encoder
__a =AltDiffusionPipeline(**__snake_case )
__a =alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_dummy_inputs(__snake_case )
__a =alt_pipe(**__snake_case )
__a =output.images
__a =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a =np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# make sure here that pndm scheduler skips prk
__a =AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=__snake_case )
__a =alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
__a ='''A painting of a squirrel eating a burger'''
__a =torch.manual_seed(0 )
__a =alt_pipe([prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
__a =output.images
__a =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a =np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
__a =AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=__snake_case , safety_checker=__snake_case )
__a =alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
__a ='''A painting of a squirrel eating a burger'''
__a =torch.manual_seed(0 )
__a =alt_pipe([prompt] , generator=__snake_case , num_inference_steps=2 , output_type='numpy' )
__a =output.images
__a =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a =np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 218 |
'''simple docstring'''
from math import isclose, sqrt
def snake_case_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> tuple[float, float, float]:
UpperCAmelCase : Optional[int] = point_y / 4 / point_x
UpperCAmelCase : str = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
UpperCAmelCase : Any = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
UpperCAmelCase : Union[str, Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
UpperCAmelCase : Union[str, Any] = outgoing_gradient**2 + 4
UpperCAmelCase : Dict = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
UpperCAmelCase : List[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
UpperCAmelCase : List[str] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
UpperCAmelCase : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
UpperCAmelCase : Optional[Any] = x_minus if isclose(_lowerCAmelCase , _lowerCAmelCase ) else x_plus
UpperCAmelCase : Union[str, Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def snake_case_ ( _lowerCAmelCase : float = 1.4 , _lowerCAmelCase : float = -9.6 ) -> int:
UpperCAmelCase : int = 0
UpperCAmelCase : float = first_x_coord
UpperCAmelCase : float = first_y_coord
UpperCAmelCase : float = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = next_point(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"{solution() = }")
| 23 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = CLIPTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizerFast
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : List[str] = {}
_SCREAMING_SNAKE_CASE : Dict = False
def a ( self : Any ) -> Optional[int]:
super().setUp()
# fmt: off
__lowerCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCAmelCase = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__snake_case ) )
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def a ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = '''lower newer'''
return input_text, output_text
def a ( self : Dict ) -> List[Any]:
__lowerCAmelCase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
__lowerCAmelCase = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__lowerCAmelCase = tokens + [tokenizer.unk_token]
__lowerCAmelCase = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
@require_ftfy
def a ( self : str ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__lowerCAmelCase = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
__lowerCAmelCase = tokenizer_s.tokenize(__snake_case )
__lowerCAmelCase = tokenizer_r.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCAmelCase = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
__lowerCAmelCase = tokenizer_s.tokenize(__snake_case )
__lowerCAmelCase = tokenizer_r.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Test that the tokenization is identical on unicode of space type
__lowerCAmelCase = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCAmelCase = tokenizer_s.tokenize(__snake_case )
__lowerCAmelCase = tokenizer_r.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Test that the tokenization is identical on unicode of line break type
__lowerCAmelCase = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCAmelCase = tokenizer_s.tokenize(__snake_case )
__lowerCAmelCase = tokenizer_r.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def a ( self : Tuple ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCAmelCase = f"""{text_of_1_token} {text_of_1_token}"""
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__snake_case , use_fast=__snake_case , )
__lowerCAmelCase = tokenizer_r(__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__snake_case ) + 1, len(__snake_case ) + 1 + len(__snake_case )) , )
__lowerCAmelCase = f""" {text}"""
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__snake_case , use_fast=__snake_case , )
__lowerCAmelCase = tokenizer_r(__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__snake_case ) + 1, 1 + len(__snake_case ) + 1 + len(__snake_case )) , )
def a ( self : str ) -> str:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(__snake_case ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def a ( self : str ) -> Dict:
super().test_tokenization_python_rust_equals()
def a ( self : Optional[Any] ) -> Optional[Any]:
# CLIP always lower cases letters
pass
| 229 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__: str = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: int = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: Union[str, Any] = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: int = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCamelCase__: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
def lowerCAmelCase__(__snake_case ) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
lowerCamelCase__ = sum(_lowerCAmelCase ) / len(_lowerCAmelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = AltDiffusionPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def A ( self : Dict ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase : Dict = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
UpperCAmelCase : List[Any] = CLIPTextModel(__snake_case )
UpperCAmelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
UpperCAmelCase : Optional[int] = 77
UpperCAmelCase : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : Optional[Any] , __snake_case : Dict , __snake_case : List[str]=0 ) -> Union[str, Any]:
if str(__snake_case ).startswith('''mps''' ):
UpperCAmelCase : str = torch.manual_seed(__snake_case )
else:
UpperCAmelCase : Tuple = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
UpperCAmelCase : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Union[str, Any] ) -> List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def A ( self : Tuple ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def A ( self : Tuple ) -> Optional[int]:
UpperCAmelCase : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Any = self.get_dummy_components()
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase : List[str] = RobertaSeriesModelWithTransformation(__snake_case )
UpperCAmelCase : str = text_encoder
UpperCAmelCase : Optional[int] = AltDiffusionPipeline(**__snake_case )
UpperCAmelCase : str = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : Optional[int] = self.get_dummy_inputs(__snake_case )
UpperCAmelCase : Optional[int] = '''A photo of an astronaut'''
UpperCAmelCase : List[Any] = alt_pipe(**__snake_case )
UpperCAmelCase : Optional[Any] = output.images
UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : int ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : int = PNDMScheduler(skip_prk_steps=__snake_case )
torch.manual_seed(0 )
UpperCAmelCase : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase : Union[str, Any] = RobertaSeriesModelWithTransformation(__snake_case )
UpperCAmelCase : Union[str, Any] = text_encoder
UpperCAmelCase : Optional[int] = AltDiffusionPipeline(**__snake_case )
UpperCAmelCase : Dict = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : int = self.get_dummy_inputs(__snake_case )
UpperCAmelCase : Optional[int] = alt_pipe(**__snake_case )
UpperCAmelCase : Optional[int] = output.images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Optional[int] = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : str ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ) -> Any:
# make sure here that pndm scheduler skips prk
UpperCAmelCase : List[Any] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=__snake_case )
UpperCAmelCase : Tuple = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : List[Any] = '''A painting of a squirrel eating a burger'''
UpperCAmelCase : Any = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = alt_pipe([prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase : Dict = output.images
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : Tuple ) -> int:
UpperCAmelCase : int = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
UpperCAmelCase : Tuple = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=__snake_case , safety_checker=__snake_case )
UpperCAmelCase : Dict = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : Tuple = '''A painting of a squirrel eating a burger'''
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : List[Any] = alt_pipe([prompt] , generator=__snake_case , num_inference_steps=2 , output_type='''numpy''' )
UpperCAmelCase : Dict = output.images
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : Union[str, Any] = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 23 | 0 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : Optional[Any] ):
_snake_case = torch.nn.Linear(10 , 10 )
_snake_case = torch.optim.SGD(model.parameters() , 0.1 )
_snake_case = Accelerator()
_snake_case = accelerator.prepare(__snake_case )
try:
pickle.loads(pickle.dumps(__snake_case ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 288 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ) -> Optional[int]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] ) -> Dict:
UpperCAmelCase : Dict = tmp_path / '''cache'''
UpperCAmelCase : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : str = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : int ) -> Optional[int]:
UpperCAmelCase : Any = tmp_path / '''cache'''
UpperCAmelCase : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : Any = features.copy() if features else default_expected_features
UpperCAmelCase : List[Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Dict = JsonDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple ) -> Tuple:
UpperCAmelCase : Optional[Any] = tmp_path / '''cache'''
UpperCAmelCase : Optional[int] = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCAmelCase : int = features.copy() if features else default_expected_features
UpperCAmelCase : Any = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = JsonDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> Union[str, Any]:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCAmelCase : Tuple = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCAmelCase : List[str] = features.copy()
UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = tmp_path / '''cache'''
UpperCAmelCase : List[str] = JsonDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> Optional[Any]:
UpperCAmelCase : Any = tmp_path / '''cache'''
UpperCAmelCase : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : List[Any] = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Dict:
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase : str = jsonl_path
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase : Dict = [jsonl_path]
UpperCAmelCase : int = tmp_path / '''cache'''
UpperCAmelCase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : Optional[int] = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict=("train",) ) -> Union[str, Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
for split in splits:
UpperCAmelCase : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ) -> Any:
UpperCAmelCase : Optional[Any] = tmp_path / '''cache'''
UpperCAmelCase : List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Optional[int] = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_json_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> int:
UpperCAmelCase : Dict = tmp_path / '''cache'''
UpperCAmelCase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : Optional[int] = features.copy() if features else default_expected_features
UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = JsonDatasetReader({'''train''': jsonl_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_json_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ) -> Union[str, Any]:
if split:
UpperCAmelCase : Optional[int] = {split: jsonl_path}
else:
UpperCAmelCase : Any = '''train'''
UpperCAmelCase : Any = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCAmelCase : Tuple = tmp_path / '''cache'''
UpperCAmelCase : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : Optional[Any] = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_json_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case_ ( _lowerCAmelCase : List[str] ) -> str:
return json.load(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Dict ) -> str:
return [json.loads(_lowerCAmelCase ) for line in buffer]
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def A ( self : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write()
buffer.seek(0 )
UpperCAmelCase : Union[str, Any] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def A ( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Optional[Any] ) -> List[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write()
buffer.seek(0 )
UpperCAmelCase : Union[str, Any] = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def A ( self : str , __snake_case : str , __snake_case : str , __snake_case : int ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
UpperCAmelCase : Any = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def A ( self : Any , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
UpperCAmelCase : List[str] = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
def A ( self : List[Any] , __snake_case : str ) -> Dict:
with pytest.raises(__snake_case ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def A ( self : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict ) -> Union[str, Any]:
UpperCAmelCase : List[str] = tmp_path_factory.mktemp('''data''' ) / F"""test.json.{extension}"""
UpperCAmelCase : List[Any] = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write()
with fsspec.open(__snake_case , '''rb''' , compression='''infer''' ) as f:
UpperCAmelCase : str = f.read()
with fsspec.open(__snake_case , '''rb''' , compression='''infer''' ) as f:
UpperCAmelCase : Optional[int] = f.read()
assert exported_content == original_content
| 23 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = checkpoint
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = vae_state_dict['''encoder.conv_in.weight''']
SCREAMING_SNAKE_CASE__ = vae_state_dict['''encoder.conv_in.bias''']
SCREAMING_SNAKE_CASE__ = vae_state_dict['''encoder.conv_out.weight''']
SCREAMING_SNAKE_CASE__ = vae_state_dict['''encoder.conv_out.bias''']
SCREAMING_SNAKE_CASE__ = vae_state_dict['''encoder.norm_out.weight''']
SCREAMING_SNAKE_CASE__ = vae_state_dict['''encoder.norm_out.bias''']
SCREAMING_SNAKE_CASE__ = vae_state_dict['''decoder.conv_in.weight''']
SCREAMING_SNAKE_CASE__ = vae_state_dict['''decoder.conv_in.bias''']
SCREAMING_SNAKE_CASE__ = vae_state_dict['''decoder.conv_out.weight''']
SCREAMING_SNAKE_CASE__ = vae_state_dict['''decoder.conv_out.bias''']
SCREAMING_SNAKE_CASE__ = vae_state_dict['''decoder.norm_out.weight''']
SCREAMING_SNAKE_CASE__ = vae_state_dict['''decoder.norm_out.bias''']
SCREAMING_SNAKE_CASE__ = vae_state_dict['''quant_conv.weight''']
SCREAMING_SNAKE_CASE__ = vae_state_dict['''quant_conv.bias''']
SCREAMING_SNAKE_CASE__ = vae_state_dict['''post_quant_conv.weight''']
SCREAMING_SNAKE_CASE__ = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
SCREAMING_SNAKE_CASE__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
SCREAMING_SNAKE_CASE__ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(_lowerCAmelCase )
}
# Retrieves the keys for the decoder up blocks only
SCREAMING_SNAKE_CASE__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
SCREAMING_SNAKE_CASE__ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(_lowerCAmelCase )
}
for i in range(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
SCREAMING_SNAKE_CASE__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
SCREAMING_SNAKE_CASE__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = {'''old''': f"""down.{i}.block""", '''new''': f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
SCREAMING_SNAKE_CASE__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE__ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
SCREAMING_SNAKE_CASE__ = renew_vae_attention_paths(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
conv_attn_to_linear(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ = num_up_blocks - 1 - i
SCREAMING_SNAKE_CASE__ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
SCREAMING_SNAKE_CASE__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
SCREAMING_SNAKE_CASE__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = {'''old''': f"""up.{block_id}.block""", '''new''': f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
SCREAMING_SNAKE_CASE__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE__ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
SCREAMING_SNAKE_CASE__ = renew_vae_attention_paths(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
conv_attn_to_linear(_lowerCAmelCase )
return new_checkpoint
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
SCREAMING_SNAKE_CASE__ = io.BytesIO(r.content )
SCREAMING_SNAKE_CASE__ = OmegaConf.load(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = 5_12
SCREAMING_SNAKE_CASE__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
SCREAMING_SNAKE_CASE__ = {}
with safe_open(_lowerCAmelCase , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
SCREAMING_SNAKE_CASE__ = f.get_tensor(_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )['''state_dict''']
# Convert the VAE model.
SCREAMING_SNAKE_CASE__ = create_vae_diffusers_config(_lowerCAmelCase , image_size=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = custom_convert_ldm_vae_checkpoint(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(**_lowerCAmelCase )
vae.load_state_dict(_lowerCAmelCase )
vae.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
__lowerCamelCase : str = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 219 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__: Tuple = logging.get_logger(__name__)
UpperCamelCase__: Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCamelCase__: Optional[int] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
UpperCamelCase__: Dict = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
UpperCamelCase__: Tuple = "▁"
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __snake_case : List[Any] , __snake_case : Tuple="<s>" , __snake_case : List[Any]="</s>" , __snake_case : int="</s>" , __snake_case : Any="<s>" , __snake_case : Optional[int]="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : Union[str, Any]="<mask>" , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Dict , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : int = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
UpperCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
UpperCAmelCase : Optional[int] = vocab_file
UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
UpperCAmelCase : int = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
UpperCAmelCase : Optional[Any] = len(self.sp_model ) - 1
UpperCAmelCase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A ( self : Tuple , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def A ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : Tuple = [self.sep_token_id]
UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self : Dict ) -> Optional[int]:
return len(self.sp_model )
def A ( self : List[str] ) -> Dict:
UpperCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : Optional[Any] , __snake_case : str ) -> List[str]:
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def A ( self : int , __snake_case : int ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase : Optional[Any] = self.sp_model.PieceToId(__snake_case )
return spm_id if spm_id else self.unk_token_id
def A ( self : int , __snake_case : Any ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__snake_case )
def A ( self : List[Any] , __snake_case : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : int = ''''''
UpperCAmelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__snake_case ) + token
UpperCAmelCase : str = True
UpperCAmelCase : List[str] = []
else:
current_sub_tokens.append(__snake_case )
UpperCAmelCase : Optional[int] = False
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def __getstate__( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = self.__dict__.copy()
UpperCAmelCase : Any = None
return state
def __setstate__( self : Optional[int] , __snake_case : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase : Optional[Any] = {}
UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : Optional[int] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : Union[str, Any] = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , '''wb''' ) as fi:
UpperCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
| 23 | 0 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__snake_case :Union[str, Any] = logging.get_logger(__name__)
class _A :
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = question_encoder
__a = generator
__a = self.question_encoder
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
if os.path.isfile(__snake_case):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(__snake_case , exist_ok=__snake_case)
__a = os.path.join(__snake_case , '''question_encoder_tokenizer''')
__a = os.path.join(__snake_case , '''generator_tokenizer''')
self.question_encoder.save_pretrained(__snake_case)
self.generator.save_pretrained(__snake_case)
@classmethod
def _lowerCamelCase ( cls : List[str] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
__a = kwargs.pop('''config''' , __snake_case)
if config is None:
__a = RagConfig.from_pretrained(__snake_case)
__a = AutoTokenizer.from_pretrained(
__snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''')
__a = AutoTokenizer.from_pretrained(
__snake_case , config=config.generator , subfolder='''generator_tokenizer''')
return cls(question_encoder=__snake_case , generator=__snake_case)
def __call__( self : int , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
return self.current_tokenizer(*__snake_case , **__snake_case)
def _lowerCamelCase ( self : Tuple , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
return self.generator.batch_decode(*__snake_case , **__snake_case)
def _lowerCamelCase ( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return self.generator.decode(*__snake_case , **__snake_case)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.question_encoder
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.generator
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : str = "longest" , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __snake_case , )
if max_length is None:
__a = self.current_tokenizer.model_max_length
__a = self(
__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , max_length=__snake_case , padding=__snake_case , truncation=__snake_case , **__snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__a = self.current_tokenizer.model_max_length
__a = self(
text_target=__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , padding=__snake_case , max_length=__snake_case , truncation=__snake_case , **__snake_case , )
__a = labels['''input_ids''']
return model_inputs
| 49 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
UpperCamelCase__: Tuple = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A ( cls : Union[str, Any] ) -> int:
UpperCAmelCase : Optional[Any] = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def A ( cls : List[str] ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A ( self : int ) -> Tuple:
UpperCAmelCase : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : Dict = FlaxBertModel(__snake_case )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
UpperCAmelCase : List[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__snake_case , repo_id='''test-model-flax''' , push_to_hub=__snake_case , use_auth_token=self._token )
UpperCAmelCase : str = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
UpperCAmelCase : Any = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Optional[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" )
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : Optional[Any] = FlaxBertModel(__snake_case )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase : List[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__snake_case , use_auth_token=self._token )
UpperCAmelCase : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase : Any = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" )
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ) -> Union[str, Any]:
UpperCAmelCase : str = True
UpperCAmelCase : int = flatten_dict(modela.params )
UpperCAmelCase : Dict = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase : Dict = False
return models_are_equal
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase : Dict = FlaxBertModel(__snake_case )
UpperCAmelCase : int = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__snake_case , __snake_case ) )
with self.assertRaises(__snake_case ):
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase : str = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertTrue(check_models_equal(__snake_case , __snake_case ) )
def A ( self : List[str] ) -> Dict:
UpperCAmelCase : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase : Dict = FlaxBertModel(__snake_case )
UpperCAmelCase : Optional[int] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__snake_case , __snake_case ) , max_shard_size='''10KB''' )
with self.assertRaises(__snake_case ):
UpperCAmelCase : Any = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertTrue(check_models_equal(__snake_case , __snake_case ) )
def A ( self : Optional[int] ) -> str:
UpperCAmelCase : Dict = '''bert'''
UpperCAmelCase : int = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__snake_case ):
UpperCAmelCase : Optional[Any] = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertIsNotNone(__snake_case )
def A ( self : Dict ) -> List[Any]:
UpperCAmelCase : Optional[int] = '''bert'''
UpperCAmelCase : int = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__snake_case ):
UpperCAmelCase : Dict = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertIsNotNone(__snake_case )
| 23 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ :int = logging.get_logger(__name__)
class lowercase ( A__ ):
lowercase_ : Tuple =['''pixel_values''']
def __init__( self ,A__ = True ,A__ = None ,A__ = PIL.Image.BICUBIC ,A__ = True ,A__ = None ,A__ = 1 / 2_5_5 ,A__ = True ,A__ = True ,A__ = None ,A__ = None ,**A__ ,):
super().__init__(**__snake_case)
lowercase = size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
lowercase = get_size_dict(__snake_case)
lowercase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowercase = get_size_dict(__snake_case ,param_name='''crop_size''')
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self ,A__ ,A__ ,A__ = PIL.Image.BICUBIC ,A__ = None ,**A__ ,):
lowercase = get_size_dict(__snake_case)
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}')
return resize(
__snake_case ,size=(size['''height'''], size['''width''']) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case)
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
lowercase = get_size_dict(__snake_case)
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}')
return center_crop(__snake_case ,size=(size['''height'''], size['''width''']) ,data_format=__snake_case ,**__snake_case)
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
return rescale(__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case)
def A__ ( self ,A__ ,A__ ,A__ ,A__ = None ,**A__ ,):
return normalize(__snake_case ,mean=__snake_case ,std=__snake_case ,data_format=__snake_case ,**__snake_case)
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__=None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = ChannelDimension.FIRST ,**A__ ,):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = size if size is not None else self.size
lowercase = get_size_dict(__snake_case)
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(__snake_case ,param_name='''crop_size''')
lowercase = make_list_of_images(__snake_case)
if not valid_images(__snake_case):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(__snake_case) for image in images]
if do_resize:
lowercase = [self.resize(image=__snake_case ,size=__snake_case ,resample=__snake_case) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=__snake_case ,size=__snake_case) for image in images]
if do_rescale:
lowercase = [self.rescale(image=__snake_case ,scale=__snake_case) for image in images]
if do_normalize:
lowercase = [self.normalize(image=__snake_case ,mean=__snake_case ,std=__snake_case) for image in images]
lowercase = [to_channel_dimension_format(__snake_case ,__snake_case) for image in images]
lowercase = {'''pixel_values''': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case)
| 101 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : str , __snake_case : Union[str, Any]=2 , __snake_case : Optional[int]=8 , __snake_case : Any=True , __snake_case : Union[str, Any]=True , __snake_case : Dict=True , __snake_case : int=True , __snake_case : List[Any]=99 , __snake_case : str=16 , __snake_case : Tuple=5 , __snake_case : Tuple=2 , __snake_case : str=36 , __snake_case : Dict="gelu" , __snake_case : str=0.0 , __snake_case : Optional[int]=0.0 , __snake_case : Optional[int]=512 , __snake_case : Optional[Any]=16 , __snake_case : int=2 , __snake_case : int=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : str=None , ) -> Optional[int]:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : int = use_input_mask
UpperCAmelCase : Any = use_token_type_ids
UpperCAmelCase : str = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : str = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = num_labels
UpperCAmelCase : Optional[int] = num_choices
UpperCAmelCase : Any = scope
def A ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Dict = None
if self.use_token_type_ids:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : str = None
UpperCAmelCase : Tuple = None
UpperCAmelCase : int = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : int ) -> Tuple:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Optional[Any] = self.get_config()
UpperCAmelCase : int = 300
return config
def A ( self : Optional[Any] ) -> Any:
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A ( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] , __snake_case : int , __snake_case : Dict , __snake_case : Tuple , __snake_case : Optional[Any] ) -> List[str]:
UpperCAmelCase : int = MraModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
UpperCAmelCase : Optional[int] = model(__snake_case , token_type_ids=__snake_case )
UpperCAmelCase : Dict = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Optional[Any] , ) -> Tuple:
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = MraModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
UpperCAmelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , )
UpperCAmelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : int ) -> Any:
UpperCAmelCase : Dict = MraForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Tuple , __snake_case : Tuple , __snake_case : Dict , __snake_case : Dict , __snake_case : Any , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Tuple ) -> Optional[int]:
UpperCAmelCase : List[str] = MraForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : str , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> int:
UpperCAmelCase : int = self.num_labels
UpperCAmelCase : Union[str, Any] = MraForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : List[str] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : str , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict ) -> int:
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : List[str] = MraForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : str , __snake_case : int , __snake_case : Any , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : str , __snake_case : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.num_choices
UpperCAmelCase : int = MraForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : str ) -> Dict:
UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[str] = config_and_inputs
UpperCAmelCase : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = ()
def A ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = MraModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def A ( self : Optional[Any] ) -> str:
self.config_tester.run_common_tests()
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : Tuple ) -> Dict:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def A ( self : Tuple ) -> List[str]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def A ( self : int ) -> Dict:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def A ( self : Any ) -> Optional[int]:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def A ( self : Dict ) -> Any:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = MraModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='''MRA does not output attentions''' )
def A ( self : str ) -> Optional[Any]:
return
@require_torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Tuple ) -> List[Any]:
UpperCAmelCase : int = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
UpperCAmelCase : Optional[Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(__snake_case )[0]
UpperCAmelCase : Optional[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Any = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Optional[int] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
UpperCAmelCase : Dict = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(__snake_case )[0]
UpperCAmelCase : int = 50265
UpperCAmelCase : int = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def A ( self : str ) -> List[Any]:
UpperCAmelCase : List[Any] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
UpperCAmelCase : List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : Tuple = model(__snake_case )[0]
UpperCAmelCase : Optional[int] = 50265
UpperCAmelCase : Tuple = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Optional[int] = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
| 23 | 0 |
def lowercase( UpperCamelCase_ ) -> list:
'''simple docstring'''
UpperCamelCase = [0] * len(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
UpperCamelCase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCamelCase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCamelCase = j
return prefix_result
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
return max(prefix_function(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Any ) -> str:
UpperCAmelCase : Any = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
UpperCAmelCase : int = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__snake_case ) , __snake_case )
def A ( self : int ) -> str:
UpperCAmelCase : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__snake_case ) , x.transpose() ) )
UpperCAmelCase : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self : str ) -> Union[str, Any]:
UpperCAmelCase : Any = np.random.randn(3 , 4 )
UpperCAmelCase : List[Any] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) , transpose(__snake_case ).numpy() ) )
UpperCAmelCase : Tuple = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Any = torch.tensor(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , transpose(__snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase : int = np.random.randn(3 , 4 )
UpperCAmelCase : Optional[int] = tf.constant(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) , transpose(__snake_case ).numpy() ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : str = tf.constant(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , transpose(__snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self : Tuple ) -> Any:
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = jnp.array(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) , np.asarray(transpose(__snake_case ) ) ) )
UpperCAmelCase : Dict = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : int = jnp.array(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(__snake_case , axes=(1, 2, 0) ) ) ) )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , np.reshape(__snake_case , (4, 3) ) ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , np.reshape(__snake_case , (12, 5) ) ) )
@require_torch
def A ( self : Union[str, Any] ) -> int:
UpperCAmelCase : Dict = np.random.randn(3 , 4 )
UpperCAmelCase : Optional[Any] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , reshape(__snake_case , (4, 3) ).numpy() ) )
UpperCAmelCase : List[str] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[Any] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , reshape(__snake_case , (12, 5) ).numpy() ) )
@require_tf
def A ( self : int ) -> List[str]:
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = tf.constant(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , reshape(__snake_case , (4, 3) ).numpy() ) )
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Optional[Any] = tf.constant(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , reshape(__snake_case , (12, 5) ).numpy() ) )
@require_flax
def A ( self : Any ) -> Dict:
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = jnp.array(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , np.asarray(reshape(__snake_case , (4, 3) ) ) ) )
UpperCAmelCase : Any = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Optional[Any] = jnp.array(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , np.asarray(reshape(__snake_case , (12, 5) ) ) ) )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__snake_case ) , np.squeeze(__snake_case ) ) )
UpperCAmelCase : str = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , np.squeeze(__snake_case , axis=2 ) ) )
@require_torch
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : List[str] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) , squeeze(__snake_case ).numpy() ) )
UpperCAmelCase : Any = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : str = torch.tensor(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , squeeze(__snake_case , axis=2 ).numpy() ) )
@require_tf
def A ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : int = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Optional[int] = tf.constant(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) , squeeze(__snake_case ).numpy() ) )
UpperCAmelCase : List[str] = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : Optional[int] = tf.constant(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , squeeze(__snake_case , axis=2 ).numpy() ) )
@require_flax
def A ( self : List[Any] ) -> Dict:
UpperCAmelCase : Optional[int] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : int = jnp.array(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) , np.asarray(squeeze(__snake_case ) ) ) )
UpperCAmelCase : str = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : int = jnp.array(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , np.asarray(squeeze(__snake_case , axis=2 ) ) ) )
def A ( self : Optional[Any] ) -> int:
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , np.expand_dims(__snake_case , axis=1 ) ) )
@require_torch
def A ( self : List[str] ) -> Tuple:
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : Tuple = torch.tensor(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , expand_dims(__snake_case , axis=1 ).numpy() ) )
@require_tf
def A ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
UpperCAmelCase : Any = tf.constant(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , expand_dims(__snake_case , axis=1 ).numpy() ) )
@require_flax
def A ( self : Any ) -> List[Any]:
UpperCAmelCase : List[str] = np.random.randn(3 , 4 )
UpperCAmelCase : str = jnp.array(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , np.asarray(expand_dims(__snake_case , axis=1 ) ) ) )
| 23 | 0 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__a = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
return max(metric_fn(_lowerCAmelCase , _lowerCAmelCase ) for gt in ground_truths )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
snake_case__ : str = [line.strip() for line in open(_lowerCAmelCase , """r""" ).readlines()]
snake_case__ : Optional[Any] = []
if args.gold_data_mode == "qa":
snake_case__ : Optional[int] = pd.read_csv(_lowerCAmelCase , sep="""\t""" , header=_lowerCAmelCase )
for answer_list in data[1]:
snake_case__ : int = ast.literal_eval(_lowerCAmelCase )
answers.append(_lowerCAmelCase )
else:
snake_case__ : int = [line.strip() for line in open(_lowerCAmelCase , """r""" ).readlines()]
snake_case__ : Optional[Any] = [[reference] for reference in references]
snake_case__ : Optional[int] = 0
for prediction, ground_truths in zip(_lowerCAmelCase , _lowerCAmelCase ):
total += 1
em += metric_max_over_ground_truths(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
fa += metric_max_over_ground_truths(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Union[str, Any] = 100.0 * em / total
snake_case__ : Any = 100.0 * fa / total
logger.info(f"F1: {fa:.2f}" )
logger.info(f"EM: {em:.2f}" )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
snake_case__ : Union[str, Any] = args.k
snake_case__ : Tuple = [line.strip() for line in open(_lowerCAmelCase , """r""" ).readlines()]
snake_case__ : Tuple = [line.strip() for line in open(_lowerCAmelCase , """r""" ).readlines()]
snake_case__ : Any = 0
for hypo, reference in zip(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : Dict = set(hypo.split("""\t""" )[:k] )
snake_case__ : Optional[Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
snake_case__ : str = 100.0 * em / total
logger.info(f"Precision@{k}: {em: .2f}" )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
def strip_title(_lowerCAmelCase ):
if title.startswith("""\"""" ):
snake_case__ : Tuple = title[1:]
if title.endswith("""\"""" ):
snake_case__ : Optional[Any] = title[:-1]
return title
snake_case__ : Union[str, Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_lowerCAmelCase , return_tensors="""pt""" , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , )['''input_ids'''].to(args.device )
snake_case__ : str = rag_model.rag.question_encoder(_lowerCAmelCase )
snake_case__ : Union[str, Any] = question_enc_outputs[0]
snake_case__ : Any = rag_model.retriever(
_lowerCAmelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
snake_case__ : int = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
snake_case__ : int = []
for docs in all_docs:
snake_case__ : Optional[int] = [strip_title(_lowerCAmelCase ) for title in docs['''title''']]
provenance_strings.append("""\t""".join(_lowerCAmelCase ) )
return provenance_strings
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
with torch.no_grad():
snake_case__ : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_lowerCAmelCase , return_tensors="""pt""" , padding=_lowerCAmelCase , truncation=_lowerCAmelCase )
snake_case__ : Union[str, Any] = inputs_dict.input_ids.to(args.device )
snake_case__ : Union[str, Any] = inputs_dict.attention_mask.to(args.device )
snake_case__ : Optional[int] = rag_model.generate( # rag_model overwrites generate
_lowerCAmelCase , attention_mask=_lowerCAmelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_lowerCAmelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
snake_case__ : str = rag_model.retriever.generator_tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
if args.print_predictions:
for q, a in zip(_lowerCAmelCase , _lowerCAmelCase ):
logger.info("""Q: {} - A: {}""".format(_lowerCAmelCase , _lowerCAmelCase ) )
return answers
def __snake_case( ) -> List[Any]:
snake_case__ : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=_lowerCAmelCase , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=_lowerCAmelCase , choices=["""exact""", """compressed""", """legacy"""] , type=_lowerCAmelCase , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=_lowerCAmelCase , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=_lowerCAmelCase , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=_lowerCAmelCase , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=_lowerCAmelCase , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=_lowerCAmelCase , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=_lowerCAmelCase , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=_lowerCAmelCase , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=_lowerCAmelCase , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=_lowerCAmelCase , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
snake_case__ : Any = parser.parse_args()
snake_case__ : str = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : int = {}
if args.model_type is None:
snake_case__ : Union[str, Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
snake_case__ : Optional[Any] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
snake_case__ : Dict = args.n_docs
if args.index_name is not None:
snake_case__ : Dict = args.index_name
if args.index_path is not None:
snake_case__ : str = args.index_path
else:
snake_case__ : int = BartForConditionalGeneration
snake_case__ : Optional[Any] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , _lowerCAmelCase )
snake_case__ : str = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
snake_case__ : Any = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(_lowerCAmelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(_lowerCAmelCase ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
snake_case__ : int = RagRetriever.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
snake_case__ : Optional[Any] = model_class.from_pretrained(_lowerCAmelCase , retriever=_lowerCAmelCase , **_lowerCAmelCase )
model.retriever.init_retrieval()
else:
snake_case__ : Union[str, Any] = model_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
snake_case__ : List[str] = []
for line in tqdm(_lowerCAmelCase ):
questions.append(line.strip() )
if len(_lowerCAmelCase ) == args.eval_batch_size:
snake_case__ : Any = evaluate_batch_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
preds_file.write("""\n""".join(_lowerCAmelCase ) + """\n""" )
preds_file.flush()
snake_case__ : str = []
if len(_lowerCAmelCase ) > 0:
snake_case__ : Optional[int] = evaluate_batch_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
preds_file.write("""\n""".join(_lowerCAmelCase ) )
preds_file.flush()
score_fn(_lowerCAmelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__a = get_args()
main(args)
| 35 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase__: Union[str, Any] = "examples/"
UpperCamelCase__: Optional[Any] = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
UpperCamelCase__: Optional[int] = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
UpperCamelCase__: List[Any] = "README.md"
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ) -> Optional[int]:
with open(_lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase : Optional[int] = f.read()
UpperCAmelCase , UpperCAmelCase : List[Any] = REPLACE_PATTERNS[pattern]
UpperCAmelCase : List[Any] = replace.replace('''VERSION''' , _lowerCAmelCase )
UpperCAmelCase : Optional[Any] = re_pattern.sub(_lowerCAmelCase , _lowerCAmelCase )
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Any ) -> Optional[int]:
for folder, directories, fnames in os.walk(_lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , pattern='''examples''' )
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : str=False ) -> List[str]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not patch:
update_version_in_examples(_lowerCAmelCase )
def snake_case_ ( ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase : Optional[int] = '''1. Want to contribute a new model?'''
with open(_lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase : Optional[Any] = f.readlines()
# Find the start of the list.
UpperCAmelCase : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase : Optional[int] = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_lowerCAmelCase )
def snake_case_ ( ) -> Optional[Any]:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase : Union[str, Any] = f.read()
UpperCAmelCase : int = REPLACE_PATTERNS['''init'''][0].search(_lowerCAmelCase ).groups()[0]
return packaging.version.parse(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase : Optional[int] = default_version.base_version
elif patch:
UpperCAmelCase : Union[str, Any] = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase : Union[str, Any] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase : Dict = input(f"""Which version are you releasing? [{default_version}]""" )
if len(_lowerCAmelCase ) == 0:
UpperCAmelCase : Tuple = default_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowerCAmelCase , patch=_lowerCAmelCase )
def snake_case_ ( ) -> Any:
UpperCAmelCase : List[Any] = get_version()
UpperCAmelCase : List[str] = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase : List[Any] = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase : Optional[int] = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_lowerCAmelCase ) == 0:
UpperCAmelCase : Dict = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowerCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase__: Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
UpperCamelCase__: Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 23 | 0 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowerCamelCase__ ( a__ : Dict , a__ : Optional[Any] , a__ : Tuple , a__ : str=5 ) -> Any:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count("""<mask>""" ) == 1
UpperCamelCase_ = torch.tensor(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) ).unsqueeze(0 ) # Batch size 1
UpperCamelCase_ = model(_lowerCAmelCase )[0] # The last hidden-state is the first element of the output tuple
UpperCamelCase_ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
UpperCamelCase_ = logits[0, masked_index, :]
UpperCamelCase_ = logits.softmax(dim=0 )
UpperCamelCase_ = prob.topk(k=_lowerCAmelCase , dim=0 )
UpperCamelCase_ = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_lowerCAmelCase ) )] )
UpperCamelCase_ = tokenizer.mask_token
UpperCamelCase_ = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
UpperCamelCase_ = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(_lowerCAmelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(_lowerCAmelCase ) , _lowerCAmelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_lowerCAmelCase , _lowerCAmelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_A = CamembertTokenizer.from_pretrained('''camembert-base''')
_A = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
_A = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 122 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
UpperCamelCase__: Tuple = numpy.array([0, 0])
UpperCamelCase__: Union[str, Any] = numpy.array([0.5, 0.8660254])
UpperCamelCase__: Dict = numpy.array([1, 0])
UpperCamelCase__: int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] , _lowerCAmelCase : int ) -> list[numpy.ndarray]:
UpperCAmelCase : Union[str, Any] = initial_vectors
for _ in range(_lowerCAmelCase ):
UpperCAmelCase : Union[str, Any] = iteration_step(_lowerCAmelCase )
return vectors
def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] ) -> list[numpy.ndarray]:
UpperCAmelCase : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCAmelCase : List[str] = vectors[i + 1]
new_vectors.append(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def snake_case_ ( _lowerCAmelCase : numpy.ndarray , _lowerCAmelCase : float ) -> numpy.ndarray:
UpperCAmelCase : List[str] = numpy.radians(_lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase : Tuple = numpy.cos(_lowerCAmelCase ), numpy.sin(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] ) -> None:
UpperCAmelCase : List[Any] = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCAmelCase , UpperCAmelCase : str = zip(*_lowerCAmelCase )
plt.plot(_lowerCAmelCase , _lowerCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__: List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 23 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase ={
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334 |
'''simple docstring'''
from manim import *
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase : Tuple = [mem.copy() for i in range(6 )]
UpperCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase : Dict = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Any = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Union[str, Any] = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Optional[Any] = Text('''CPU''' , font_size=24 )
UpperCAmelCase : Union[str, Any] = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase : Union[str, Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : List[str] = Text('''GPU''' , font_size=24 )
UpperCAmelCase : Dict = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
UpperCAmelCase : int = [mem.copy() for i in range(6 )]
UpperCAmelCase : Union[str, Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : List[str] = Text('''Model''' , font_size=24 )
UpperCAmelCase : Tuple = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
UpperCAmelCase : Any = []
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
cpu_targs.append(__snake_case )
UpperCAmelCase : int = [mem.copy() for i in range(6 )]
UpperCAmelCase : int = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Any = Text('''Loaded Checkpoint''' , font_size=24 )
UpperCAmelCase : Union[str, Any] = Group(__snake_case , __snake_case ).arrange(__snake_case , aligned_edge=__snake_case , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase : str = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
UpperCAmelCase : Tuple = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase : List[Any] = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) , Write(__snake_case ) )
self.play(Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
UpperCAmelCase : Tuple = []
UpperCAmelCase : int = []
for i, rect in enumerate(__snake_case ):
UpperCAmelCase : Any = fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
first_animations.append(GrowFromCenter(__snake_case , run_time=1 ) )
UpperCAmelCase : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(*__snake_case )
self.wait()
| 23 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : int = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
_lowerCAmelCase : Any = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_lowerCAmelCase : Dict = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_lowerCAmelCase : int = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
_lowerCAmelCase : int = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
_lowerCAmelCase : Tuple = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
_lowerCAmelCase : Union[str, Any] = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
_lowerCAmelCase : Optional[Any] = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
_lowerCAmelCase : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
_lowerCAmelCase : List[Any] = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
_lowerCAmelCase : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
_lowerCAmelCase : Dict = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
_lowerCAmelCase : str = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
_lowerCAmelCase : Any = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
_lowerCAmelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_lowerCAmelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_lowerCAmelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_lowerCAmelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_lowerCAmelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCAmelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_lowerCAmelCase : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_lowerCAmelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCAmelCase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_lowerCAmelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_lowerCAmelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_lowerCAmelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_lowerCAmelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_lowerCAmelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __magic_name__ ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE = FLAX_MODEL_MAPPING
_lowerCAmelCase : List[str] = auto_class_update(FlaxAutoModel)
class __magic_name__ ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_lowerCAmelCase : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __magic_name__ ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCAmelCase : Dict = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __magic_name__ ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_lowerCAmelCase : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __magic_name__ ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCAmelCase : Union[str, Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __magic_name__ ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase : str = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __magic_name__ ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_lowerCAmelCase : Tuple = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __magic_name__ ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCAmelCase : Union[str, Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __magic_name__ ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_lowerCAmelCase : List[str] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __magic_name__ ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_lowerCAmelCase : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __magic_name__ ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCAmelCase : Tuple = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __magic_name__ ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCAmelCase : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __magic_name__ ( _BaseAutoModelClass ):
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_lowerCAmelCase : Union[str, Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 218 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
UpperCamelCase__: str = None
UpperCamelCase__: int = {
"7B": 11008,
"13B": 13824,
"30B": 17920,
"65B": 22016,
"70B": 28672,
}
UpperCamelCase__: List[Any] = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : List[Any]=256 ) -> Optional[Any]:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def snake_case_ ( _lowerCAmelCase : List[str] ) -> str:
with open(_lowerCAmelCase , '''r''' ) as f:
return json.load(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any ) -> Optional[Any]:
with open(_lowerCAmelCase , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Any=True ) -> List[Any]:
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCAmelCase : int = os.path.join(_lowerCAmelCase , '''tmp''' )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCAmelCase : List[str] = read_json(os.path.join(_lowerCAmelCase , '''params.json''' ) )
UpperCAmelCase : str = NUM_SHARDS[model_size]
UpperCAmelCase : Any = params['''n_layers''']
UpperCAmelCase : str = params['''n_heads''']
UpperCAmelCase : Any = n_heads // num_shards
UpperCAmelCase : List[str] = params['''dim''']
UpperCAmelCase : Optional[Any] = dim // n_heads
UpperCAmelCase : str = 1_0_0_0_0.0
UpperCAmelCase : Optional[int] = 1.0 / (base ** (torch.arange(0 , _lowerCAmelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
UpperCAmelCase : Tuple = params['''n_kv_heads'''] # for GQA / MQA
UpperCAmelCase : Optional[int] = n_heads_per_shard // num_key_value_heads
UpperCAmelCase : Optional[Any] = dim // num_key_value_heads
else: # compatibility with other checkpoints
UpperCAmelCase : List[str] = n_heads
UpperCAmelCase : Optional[int] = n_heads_per_shard
UpperCAmelCase : List[str] = dim
# permute for sliced rotary
def permute(_lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any]=n_heads , _lowerCAmelCase : int=dim , _lowerCAmelCase : Dict=dim ):
return w.view(_lowerCAmelCase , dima // n_heads // 2 , 2 , _lowerCAmelCase ).transpose(1 , 2 ).reshape(_lowerCAmelCase , _lowerCAmelCase )
print(f"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
UpperCAmelCase : int = torch.load(os.path.join(_lowerCAmelCase , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
UpperCAmelCase : Optional[Any] = [
torch.load(os.path.join(_lowerCAmelCase , f"""consolidated.{i:02d}.pth""" ) , map_location='''cpu''' )
for i in range(_lowerCAmelCase )
]
UpperCAmelCase : Any = 0
UpperCAmelCase : str = {'''weight_map''': {}}
for layer_i in range(_lowerCAmelCase ):
UpperCAmelCase : Optional[Any] = f"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
UpperCAmelCase : Optional[int] = {
f"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wq.weight"""] ),
f"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wk.weight"""] ),
f"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[f"""layers.{layer_i}.attention.wv.weight"""],
f"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[f"""layers.{layer_i}.attention.wo.weight"""],
f"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w1.weight"""],
f"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w2.weight"""],
f"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w3.weight"""],
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[f"""layers.{layer_i}.attention_norm.weight"""],
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[f"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
UpperCAmelCase : List[str] = {
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.attention_norm.weight"""
].clone(),
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
UpperCAmelCase : Union[str, Any] = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wq.weight"""].view(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for i in range(_lowerCAmelCase )
] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase : Optional[Any] = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wk.weight"""].view(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for i in range(_lowerCAmelCase )
] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
UpperCAmelCase : str = torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wv.weight"""].view(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for i in range(_lowerCAmelCase )
] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Optional[int] = torch.cat(
[loaded[i][f"""layers.{layer_i}.attention.wo.weight"""] for i in range(_lowerCAmelCase )] , dim=1 )
UpperCAmelCase : Any = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(_lowerCAmelCase )] , dim=0 )
UpperCAmelCase : str = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(_lowerCAmelCase )] , dim=1 )
UpperCAmelCase : Tuple = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(_lowerCAmelCase )] , dim=0 )
UpperCAmelCase : Any = inv_freq
for k, v in state_dict.items():
UpperCAmelCase : List[Any] = filename
param_count += v.numel()
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase : Optional[int] = f"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
UpperCAmelCase : str = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
UpperCAmelCase : Any = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(_lowerCAmelCase )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(_lowerCAmelCase )] , dim=0 ),
}
for k, v in state_dict.items():
UpperCAmelCase : Optional[int] = filename
param_count += v.numel()
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
# Write configs
UpperCAmelCase : Union[str, Any] = {'''total_size''': param_count * 2}
write_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , '''pytorch_model.bin.index.json''' ) )
UpperCAmelCase : int = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
UpperCAmelCase : Tuple = params['''multiple_of'''] if '''multiple_of''' in params else 256
UpperCAmelCase : Any = LlamaConfig(
hidden_size=_lowerCAmelCase , intermediate_size=compute_intermediate_size(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=_lowerCAmelCase , )
config.save_pretrained(_lowerCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_lowerCAmelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=_lowerCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(_lowerCAmelCase , safe_serialization=_lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ) -> List[str]:
# Initialize the tokenizer based on the `spm` model
UpperCAmelCase : Dict = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
UpperCAmelCase : List[Any] = tokenizer_class(_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
def snake_case_ ( ) -> List[Any]:
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=_lowerCAmelCase , help='''Whether or not to save using `safetensors`.''' )
UpperCAmelCase : List[Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
UpperCAmelCase : Optional[int] = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 23 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : int = logging.get_logger(__name__)
_A : Tuple = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class _lowercase ( A__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[int] = """funnel"""
_SCREAMING_SNAKE_CASE : Tuple = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : List[str]=[4, 4, 4] , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_68 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : Any=64 , SCREAMING_SNAKE_CASE__ : Any=30_72 , SCREAMING_SNAKE_CASE__ : Any="gelu_new" , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1e-9 , SCREAMING_SNAKE_CASE__ : Tuple="mean" , SCREAMING_SNAKE_CASE__ : List[str]="relative_shift" , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[int]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = block_sizes
__lowerCAmelCase = [1] * len(__snake_case ) if block_repeats is None else block_repeats
assert len(__snake_case ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__lowerCAmelCase = num_decoder_layers
__lowerCAmelCase = d_model
__lowerCAmelCase = n_head
__lowerCAmelCase = d_head
__lowerCAmelCase = d_inner
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = initializer_range
__lowerCAmelCase = initializer_std
__lowerCAmelCase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
__lowerCAmelCase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
__lowerCAmelCase = attention_type
__lowerCAmelCase = separate_cls
__lowerCAmelCase = truncate_seq
__lowerCAmelCase = pool_q_only
super().__init__(**__snake_case )
@property
def a ( self : int ) -> Optional[int]:
return sum(self.block_sizes )
@num_hidden_layers.setter
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]:
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def a ( self : Dict ) -> int:
return len(self.block_sizes )
@num_blocks.setter
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 229 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = cos(_lowerCAmelCase )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : Any = (1 - _cos) / 2
UpperCAmelCase : List[Any] = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : Dict = 1 - alpha
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : Any = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(_lowerCAmelCase )
UpperCAmelCase : Tuple = cos(_lowerCAmelCase )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Tuple = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : Optional[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : Optional[int] = sin(_lowerCAmelCase )
UpperCAmelCase : Tuple = cos(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = _sin / 2
UpperCAmelCase : Any = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : Optional[Any] = 1 + alpha
UpperCAmelCase : List[Any] = -2 * _cos
UpperCAmelCase : Optional[Any] = 1 - alpha
UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : List[str] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase )
UpperCAmelCase : str = cos(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 1 - alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase : Optional[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = cos(_lowerCAmelCase )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : str = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Union[str, Any] = -2 * _cos
UpperCAmelCase : Optional[Any] = 1 - alpha * big_a
UpperCAmelCase : Union[str, Any] = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : Any = 1 - alpha / big_a
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase : Any = tau * frequency / samplerate
UpperCAmelCase : Optional[int] = sin(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = cos(_lowerCAmelCase )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : Optional[int] = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : int = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Optional[int] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : str = 2 * sqrt(_lowerCAmelCase ) * alpha
UpperCAmelCase : Dict = big_a * (pmc + aaa)
UpperCAmelCase : Any = 2 * big_a * mpc
UpperCAmelCase : Union[str, Any] = big_a * (pmc - aaa)
UpperCAmelCase : Optional[int] = ppmc + aaa
UpperCAmelCase : Optional[Any] = -2 * pmpc
UpperCAmelCase : Optional[Any] = ppmc - aaa
UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = cos(_lowerCAmelCase )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : int = 10 ** (gain_db / 40)
UpperCAmelCase : List[str] = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(_lowerCAmelCase ) * alpha
UpperCAmelCase : Any = big_a * (ppmc + aaa)
UpperCAmelCase : str = -2 * big_a * pmpc
UpperCAmelCase : List[Any] = big_a * (ppmc - aaa)
UpperCAmelCase : Optional[Any] = pmc + aaa
UpperCAmelCase : Any = 2 * mpc
UpperCAmelCase : str = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 23 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def lowerCAmelCase__(__snake_case ,__snake_case=False ) -> str:
'''simple docstring'''
lowerCamelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase__ = ''''''
else:
lowerCamelCase__ = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase__ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ = in_proj_bias[: config.hidden_size]
lowerCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = dct.pop(_lowerCAmelCase )
lowerCamelCase__ = val
def lowerCAmelCase__() -> str:
'''simple docstring'''
lowerCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ = Image.open(requests.get(_lowerCAmelCase ,stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase__ = 1000
lowerCamelCase__ = '''huggingface/label-files'''
lowerCamelCase__ = '''imagenet-1k-id2label.json'''
lowerCamelCase__ = json.load(open(hf_hub_download(_lowerCAmelCase ,_lowerCAmelCase ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase__ = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = int(deit_name[-6:-4] )
lowerCamelCase__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
lowerCamelCase__ = 192
lowerCamelCase__ = 768
lowerCamelCase__ = 12
lowerCamelCase__ = 3
elif deit_name[9:].startswith('''small''' ):
lowerCamelCase__ = 384
lowerCamelCase__ = 1536
lowerCamelCase__ = 12
lowerCamelCase__ = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
lowerCamelCase__ = 1024
lowerCamelCase__ = 4096
lowerCamelCase__ = 24
lowerCamelCase__ = 16
# load original model from timm
lowerCamelCase__ = timm.create_model(_lowerCAmelCase ,pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ = timm_model.state_dict()
lowerCamelCase__ = create_rename_keys(_lowerCAmelCase ,_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# load HuggingFace model
lowerCamelCase__ = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase__ = DeiTImageProcessor(size=_lowerCAmelCase ,crop_size=config.image_size )
lowerCamelCase__ = image_processor(images=prepare_img() ,return_tensors='''pt''' )
lowerCamelCase__ = encoding['''pixel_values''']
lowerCamelCase__ = model(_lowerCAmelCase )
lowerCamelCase__ = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase ,outputs.logits ,atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 209 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ) -> bool:
UpperCAmelCase : str = get_failure_array(_lowerCAmelCase )
# 2) Step through text searching for pattern
UpperCAmelCase , UpperCAmelCase : Optional[Any] = 0, 0 # index into text, pattern
while i < len(_lowerCAmelCase ):
if pattern[j] == text[i]:
if j == (len(_lowerCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCAmelCase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( _lowerCAmelCase : str ) -> list[int]:
UpperCAmelCase : Optional[Any] = [0]
UpperCAmelCase : str = 0
UpperCAmelCase : List[str] = 1
while j < len(_lowerCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCAmelCase : Union[str, Any] = failure[i - 1]
continue
j += 1
failure.append(_lowerCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
UpperCamelCase__: str = "abc1abc12"
UpperCamelCase__: str = "alskfjaldsabc1abc1abc12k23adsfabcabc"
UpperCamelCase__: Any = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCamelCase__: Tuple = "ABABX"
UpperCamelCase__: Union[str, Any] = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
UpperCamelCase__: Any = "AAAB"
UpperCamelCase__: str = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
UpperCamelCase__: int = "abcdabcy"
UpperCamelCase__: Any = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
UpperCamelCase__: List[str] = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 23 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> Optional[int]:
_snake_case = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
_snake_case = 10_24
_snake_case = 40_96
_snake_case = 24
_snake_case = 16
_snake_case = [5, 11, 17, 23]
_snake_case = [2_56, 5_12, 10_24, 10_24]
_snake_case = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
_snake_case = 7_68
_snake_case = [1, 1, 1, 0.5]
_snake_case = [2_56, 5_12, 7_68, 7_68]
_snake_case = 1_50
_snake_case = 16
_snake_case = (1, 3_84, 3_84)
_snake_case = False
_snake_case = '''project'''
if "ade" in checkpoint_url:
_snake_case = True
_snake_case = 7_68
_snake_case = [1, 1, 1, 0.5]
_snake_case = 1_50
_snake_case = 16
_snake_case = '''huggingface/label-files'''
_snake_case = '''ade20k-id2label.json'''
_snake_case = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
_snake_case = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
_snake_case = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] ) -> int:
_snake_case = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_snake_case = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
_snake_case = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
_snake_case = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
_snake_case = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
_snake_case = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
_snake_case = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
_snake_case = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
_snake_case = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_snake_case = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
_snake_case = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
_snake_case = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
_snake_case = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
_snake_case = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
_snake_case = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
_snake_case = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
_snake_case = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
_snake_case = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
_snake_case = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_snake_case = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
_snake_case = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
_snake_case = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
_snake_case = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
_snake_case = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
_snake_case = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_snake_case = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
_snake_case = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
_snake_case = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
_snake_case = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_snake_case = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
_snake_case = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
_snake_case = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
_snake_case = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
_snake_case = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
_snake_case = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
_snake_case = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
_snake_case = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
_snake_case = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
_snake_case = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
_snake_case = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
_snake_case = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
_snake_case = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
_snake_case = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
_snake_case = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
_snake_case = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
_snake_case = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
_snake_case = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
_snake_case = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
_snake_case = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
_snake_case = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[: config.hidden_size, :]
_snake_case = in_proj_bias[: config.hidden_size]
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( ) -> List[str]:
_snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_snake_case = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ) -> Any:
_snake_case = get_dpt_config(_lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_snake_case = torch.load(_lowerCAmelCase , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(_lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
_snake_case = state_dict.pop(_lowerCAmelCase )
_snake_case = val
# read in qkv matrices
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
_snake_case = DPTForSemanticSegmentation(_lowerCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Check outputs on an image
_snake_case = 4_80 if '''ade''' in checkpoint_url else 3_84
_snake_case = DPTImageProcessor(size=_lowerCAmelCase )
_snake_case = prepare_img()
_snake_case = image_processor(_lowerCAmelCase , return_tensors='''pt''' )
# forward pass
_snake_case = model(**_lowerCAmelCase ).logits if '''ade''' in checkpoint_url else model(**_lowerCAmelCase ).predicted_depth
if show_prediction:
_snake_case = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=_lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
UpperCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 288 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCamelCase__: int = logging.get_logger(__name__)
UpperCamelCase__: Dict = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
UpperCamelCase__: Optional[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def snake_case_ ( _lowerCAmelCase : str ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = {}
with open(_lowerCAmelCase , '''r''' ) as file:
for line_number, line in enumerate(_lowerCAmelCase ):
UpperCAmelCase : List[str] = line.strip()
if line:
UpperCAmelCase : str = line.split()
UpperCAmelCase : Union[str, Any] = line_number
UpperCAmelCase : List[Any] = words[0]
UpperCAmelCase : Union[str, Any] = value
return result
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ) -> int:
for attribute in key.split('''.''' ):
UpperCAmelCase : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Dict = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCAmelCase ):
UpperCAmelCase : Any = PARAM_MAPPING[full_name.split('''.''' )[-1]]
UpperCAmelCase : Dict = '''param'''
if weight_type is not None and weight_type != "param":
UpperCAmelCase : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase : List[Any] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
UpperCAmelCase : Optional[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : int = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase : Union[str, Any] = value[0]
else:
UpperCAmelCase : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase : int = value
elif weight_type == "weight_g":
UpperCAmelCase : str = value
elif weight_type == "weight_v":
UpperCAmelCase : Dict = value
elif weight_type == "bias":
UpperCAmelCase : str = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
UpperCAmelCase : int = getattr(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Optional[int] = value
else:
UpperCAmelCase : Tuple = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] ) -> List[Any]:
UpperCAmelCase : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCAmelCase ):
UpperCAmelCase : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
UpperCAmelCase : Any = '''param'''
if weight_type is not None and weight_type != "param":
UpperCAmelCase : Optional[int] = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase : Optional[int] = '''.'''.join([key, hf_param_name] )
else:
UpperCAmelCase : List[Any] = key
UpperCAmelCase : Tuple = value if '''lm_head''' in full_key else value[0]
UpperCamelCase__: Tuple = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any=None , _lowerCAmelCase : Optional[Any]=None ) -> int:
UpperCAmelCase : List[Any] = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase : Optional[Any] = True
if "*" in mapped_key:
UpperCAmelCase : Tuple = name.split(_lowerCAmelCase )[0].split('''.''' )[-2]
UpperCAmelCase : List[Any] = mapped_key.replace('''*''' , _lowerCAmelCase )
if "weight_g" in name:
UpperCAmelCase : str = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase : int = '''weight_v'''
elif "bias" in name:
UpperCAmelCase : int = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : List[str] = '''weight'''
else:
UpperCAmelCase : Dict = None
if hf_dict is not None:
rename_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return is_used
return is_used
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ) -> Any:
UpperCAmelCase : Dict = []
UpperCAmelCase : Dict = fairseq_model.state_dict()
UpperCAmelCase : Union[str, Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase : Any = True
else:
UpperCAmelCase : Optional[Any] = load_wavaveca_layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Any = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase : Optional[int] = name.split('''.''' )
UpperCAmelCase : Tuple = int(items[0] )
UpperCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[int]=False ) -> Dict:
if config_path is not None:
UpperCAmelCase : List[str] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
else:
UpperCAmelCase : List[Any] = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase : Optional[Any] = read_txt_into_dict(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = idalabel
UpperCAmelCase : Optional[Any] = WavaVecaForSequenceClassification(_lowerCAmelCase )
UpperCAmelCase : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
feature_extractor.save_pretrained(_lowerCAmelCase )
elif is_finetuned:
if dict_path:
UpperCAmelCase : Dict = Dictionary.load(_lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase : Any = target_dict.pad_index
UpperCAmelCase : Tuple = target_dict.bos_index
UpperCAmelCase : Optional[int] = target_dict.eos_index
UpperCAmelCase : Union[str, Any] = len(target_dict.symbols )
UpperCAmelCase : Dict = os.path.join(_lowerCAmelCase , '''vocab.json''' )
if not os.path.isdir(_lowerCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowerCAmelCase ) )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCAmelCase : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[str] = 1
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Optional[int] = WavaVecaCTCTokenizer(
_lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowerCAmelCase , )
UpperCAmelCase : int = True if config.feat_extract_norm == '''layer''' else False
UpperCAmelCase : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
UpperCAmelCase : str = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = WavaVecaForCTC(_lowerCAmelCase )
else:
UpperCAmelCase : Dict = WavaVecaForPreTraining(_lowerCAmelCase )
if is_finetuned or is_seq_class:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
UpperCAmelCase : Optional[Any] = argparse.Namespace(task='''audio_pretraining''' )
UpperCAmelCase : List[Any] = fairseq.tasks.setup_task(_lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCAmelCase )
UpperCAmelCase : Optional[int] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase__: Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
UpperCamelCase__: Any = parser.parse_args()
UpperCamelCase__: int = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 23 | 0 |
__lowerCamelCase : List[str] = [0, 2, 4, 6, 8]
__lowerCamelCase : Any = [1, 3, 5, 7, 9]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[int] , __UpperCamelCase : int ) -> int:
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
SCREAMING_SNAKE_CASE__ = 0
for digit in range(10 ):
SCREAMING_SNAKE_CASE__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCAmelCase , _lowerCAmelCase )
return result
SCREAMING_SNAKE_CASE__ = 0
for digita in range(10 ):
SCREAMING_SNAKE_CASE__ = digita
if (remainder + digita) % 2 == 0:
SCREAMING_SNAKE_CASE__ = ODD_DIGITS
else:
SCREAMING_SNAKE_CASE__ = EVEN_DIGITS
for digita in other_parity_digits:
SCREAMING_SNAKE_CASE__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCAmelCase , _lowerCAmelCase , )
return result
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 9 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_lowerCAmelCase , 0 , [0] * length , _lowerCAmelCase )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 219 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : str ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case , cache_dir=__snake_case )
UpperCAmelCase : str = [t[-1] for t in os.walk(os.path.join(__snake_case , os.listdir(__snake_case )[0] , '''snapshots''' ) )]
UpperCAmelCase : str = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] ) -> Dict:
UpperCAmelCase , UpperCAmelCase : str = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case )
UpperCAmelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : Optional[Any] = jax.device_count()
UpperCAmelCase : Tuple = num_samples * [prompt]
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : Any = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[Any] = shard(__snake_case )
UpperCAmelCase : Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(__snake_case , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
UpperCAmelCase : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__snake_case ) == num_samples
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__snake_case )
UpperCAmelCase : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Optional[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase : Any = 50
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : int = num_samples * [prompt]
UpperCAmelCase : Union[str, Any] = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Dict = replicate(__snake_case )
UpperCAmelCase : int = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Tuple = shard(__snake_case )
UpperCAmelCase : Tuple = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def A ( self : int ) -> Dict:
UpperCAmelCase , UpperCAmelCase : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case )
UpperCAmelCase : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Union[str, Any] = jax.random.PRNGKey(0 )
UpperCAmelCase : List[str] = 50
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : List[Any] = num_samples * [prompt]
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : List[Any] = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = shard(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def A ( self : int ) -> Any:
UpperCAmelCase , UpperCAmelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
UpperCAmelCase : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
UpperCAmelCase : Union[str, Any] = 50
UpperCAmelCase : Optional[int] = jax.device_count()
UpperCAmelCase : List[str] = num_samples * [prompt]
UpperCAmelCase : Dict = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : Any = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : str = shard(__snake_case )
UpperCAmelCase : Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : int = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__snake_case , steps_offset=1 , )
UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__snake_case , safety_checker=__snake_case , )
UpperCAmelCase : Tuple = scheduler.create_state()
UpperCAmelCase : Dict = scheduler_state
UpperCAmelCase : str = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : int = jax.random.PRNGKey(0 )
UpperCAmelCase : Union[str, Any] = 50
UpperCAmelCase : Optional[Any] = jax.device_count()
UpperCAmelCase : Any = num_samples * [prompt]
UpperCAmelCase : Dict = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : str = replicate(__snake_case )
UpperCAmelCase : List[str] = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = shard(__snake_case )
UpperCAmelCase : Dict = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def A ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : List[Any] = num_samples * [prompt]
UpperCAmelCase : str = jax.random.split(jax.random.PRNGKey(0 ) , __snake_case )
UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , )
UpperCAmelCase : Dict = replicate(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline.prepare_inputs(__snake_case )
UpperCAmelCase : List[str] = shard(__snake_case )
UpperCAmelCase : Any = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase : Optional[int] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCAmelCase , UpperCAmelCase : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , use_memory_efficient_attention=__snake_case , )
UpperCAmelCase : int = replicate(__snake_case )
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
UpperCAmelCase : List[Any] = shard(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase : int = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 23 | 0 |
import argparse
import copy
def __snake_case ( _UpperCAmelCase ):
__a = {}
with open(_lowerCAmelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__a = []
_list.append([line.split()[1], line.split()[2]] )
__a = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__a = []
_list.append([line.split()[0], line.split()[2]] )
__a = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
with open(_lowerCAmelCase ) as f:
__a = f.read(1 )
__a = start_node
__a = []
__a = start_node
__a = 0
while visiting not in first_solution:
__a = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_lowerCAmelCase ) and k[0] not in first_solution:
__a = k[1]
__a = k[0]
first_solution.append(_lowerCAmelCase )
__a = distance_of_first_solution + int(_lowerCAmelCase )
__a = best_node
first_solution.append(_lowerCAmelCase )
__a = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__a = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
for n in solution[1:-1]:
__a = solution.index(_lowerCAmelCase )
for kn in solution[1:-1]:
__a = solution.index(_lowerCAmelCase )
if n == kn:
continue
__a = copy.deepcopy(_lowerCAmelCase )
__a = kn
__a = n
__a = 0
for k in _tmp[:-1]:
__a = _tmp[_tmp.index(_lowerCAmelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__a = distance + int(i[1] )
_tmp.append(_lowerCAmelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__a = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _UpperCAmelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = 1
__a = first_solution
__a = []
__a = distance_of_first_solution
__a = solution
while count <= iters:
__a = find_neighborhood(_lowerCAmelCase , _lowerCAmelCase )
__a = 0
__a = neighborhood[index_of_best_solution]
__a = len(_lowerCAmelCase ) - 1
__a = False
while not found:
__a = 0
while i < len(_lowerCAmelCase ):
if best_solution[i] != solution[i]:
__a = best_solution[i]
__a = solution[i]
break
__a = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__a = True
__a = best_solution[:-1]
__a = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__a = cost
__a = solution
else:
__a = index_of_best_solution + 1
__a = neighborhood[index_of_best_solution]
if len(_lowerCAmelCase ) >= size:
tabu_list.pop(0 )
__a = count + 1
return best_solution_ever, best_cost
def __snake_case ( _UpperCAmelCase=None ):
__a = generate_neighbours(args.File )
__a = generate_first_solution(
args.File , _lowerCAmelCase )
__a = tabu_search(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
__snake_case :List[Any] = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 49 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any]=1000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCAmelCase : str = n - 1
UpperCAmelCase : List[Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCAmelCase : List[str] = 0
while count < prec:
UpperCAmelCase : int = random.randint(2 , n - 1 )
UpperCAmelCase : List[str] = bin_exp_mod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if b != 1:
UpperCAmelCase : int = True
for _ in range(_lowerCAmelCase ):
if b == n - 1:
UpperCAmelCase : Dict = False
break
UpperCAmelCase : str = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
UpperCamelCase__: Optional[int] = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 23 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowercase ( unittest.TestCase ):
def A__ ( self):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowercase = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' ,safety_checker=__snake_case ,cache_dir=__snake_case)
lowercase = [t[-1] for t in os.walk(os.path.join(__snake_case ,os.listdir(__snake_case)[0] ,'''snapshots'''))]
lowercase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''') for f in files)
@slow
@require_flax
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' ,safety_checker=__snake_case)
lowercase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase = jax.random.PRNGKey(0)
lowercase = 4
lowercase = jax.device_count()
lowercase = num_samples * [prompt]
lowercase = pipeline.prepare_inputs(__snake_case)
# shard inputs and rng
lowercase = replicate(__snake_case)
lowercase = jax.random.split(__snake_case ,__snake_case)
lowercase = shard(__snake_case)
lowercase = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa).sum() - 4.1514745) < 1E-3
assert np.abs(np.abs(__snake_case ,dtype=np.floataa).sum() - 4_9_9_4_7.8_7_5) < 5E-1
lowercase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(__snake_case) == num_samples
def A__ ( self):
lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''flax''' ,safety_checker=__snake_case)
lowercase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase = jax.random.PRNGKey(0)
lowercase = 5_0
lowercase = jax.device_count()
lowercase = num_samples * [prompt]
lowercase = pipeline.prepare_inputs(__snake_case)
# shard inputs and rng
lowercase = replicate(__snake_case)
lowercase = jax.random.split(__snake_case ,__snake_case)
lowercase = shard(__snake_case)
lowercase = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa).sum() - 0.05652401)) < 1E-3
assert np.abs((np.abs(__snake_case ,dtype=np.floataa).sum() - 2_3_8_3_8_0_8.2)) < 5E-1
def A__ ( self):
lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=__snake_case)
lowercase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase = jax.random.PRNGKey(0)
lowercase = 5_0
lowercase = jax.device_count()
lowercase = num_samples * [prompt]
lowercase = pipeline.prepare_inputs(__snake_case)
# shard inputs and rng
lowercase = replicate(__snake_case)
lowercase = jax.random.split(__snake_case ,__snake_case)
lowercase = shard(__snake_case)
lowercase = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa).sum() - 0.04003906)) < 1E-3
assert np.abs((np.abs(__snake_case ,dtype=np.floataa).sum() - 2_3_7_3_5_1_6.7_5)) < 5E-1
def A__ ( self):
lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa)
lowercase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase = jax.random.PRNGKey(0)
lowercase = 5_0
lowercase = jax.device_count()
lowercase = num_samples * [prompt]
lowercase = pipeline.prepare_inputs(__snake_case)
# shard inputs and rng
lowercase = replicate(__snake_case)
lowercase = jax.random.split(__snake_case ,__snake_case)
lowercase = shard(__snake_case)
lowercase = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa).sum() - 0.04003906)) < 1E-3
assert np.abs((np.abs(__snake_case ,dtype=np.floataa).sum() - 2_3_7_3_5_1_6.7_5)) < 5E-1
def A__ ( self):
lowercase = FlaxDDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,set_alpha_to_one=__snake_case ,steps_offset=1 ,)
lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,scheduler=__snake_case ,safety_checker=__snake_case ,)
lowercase = scheduler.create_state()
lowercase = scheduler_state
lowercase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase = jax.random.PRNGKey(0)
lowercase = 5_0
lowercase = jax.device_count()
lowercase = num_samples * [prompt]
lowercase = pipeline.prepare_inputs(__snake_case)
# shard inputs and rng
lowercase = replicate(__snake_case)
lowercase = jax.random.split(__snake_case ,__snake_case)
lowercase = shard(__snake_case)
lowercase = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa).sum() - 0.045043945)) < 1E-3
assert np.abs((np.abs(__snake_case ,dtype=np.floataa).sum() - 2_3_4_7_6_9_3.5)) < 5E-1
def A__ ( self):
lowercase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase = jax.device_count()
lowercase = num_samples * [prompt]
lowercase = jax.random.split(jax.random.PRNGKey(0) ,__snake_case)
lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=__snake_case ,)
lowercase = replicate(__snake_case)
lowercase = pipeline.prepare_inputs(__snake_case)
lowercase = shard(__snake_case)
lowercase = pipeline(__snake_case ,__snake_case ,__snake_case ,jit=__snake_case).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowercase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=__snake_case ,use_memory_efficient_attention=__snake_case ,)
lowercase = replicate(__snake_case)
lowercase = pipeline.prepare_inputs(__snake_case)
lowercase = shard(__snake_case)
lowercase = pipeline(__snake_case ,__snake_case ,__snake_case ,jit=__snake_case).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowercase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1E-2
| 101 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__: Optional[int] = logging.get_logger(__name__)
def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Tuple = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
UpperCAmelCase : Tuple = 1024
UpperCAmelCase : List[Any] = 4096
UpperCAmelCase : str = 24
UpperCAmelCase : List[Any] = 16
UpperCAmelCase : str = [5, 11, 17, 23]
UpperCAmelCase : List[Any] = [256, 512, 1024, 1024]
UpperCAmelCase : Tuple = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 768
UpperCAmelCase : Tuple = [1, 1, 1, 0.5]
UpperCAmelCase : int = [256, 512, 768, 768]
UpperCAmelCase : Any = 150
UpperCAmelCase : Tuple = 16
UpperCAmelCase : Any = (1, 384, 384)
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Tuple = '''project'''
if "ade" in checkpoint_url:
UpperCAmelCase : Any = True
UpperCAmelCase : str = 768
UpperCAmelCase : Optional[int] = [1, 1, 1, 0.5]
UpperCAmelCase : List[Any] = 150
UpperCAmelCase : List[Any] = 16
UpperCAmelCase : str = '''huggingface/label-files'''
UpperCAmelCase : Tuple = '''ade20k-id2label.json'''
UpperCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : List[Any] = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = [1, 150, 480, 480]
return config, expected_shape
def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> int:
UpperCAmelCase : List[str] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Tuple ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase : Tuple = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
UpperCAmelCase : int = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
UpperCAmelCase : Tuple = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
UpperCAmelCase : Any = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
UpperCAmelCase : str = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
UpperCAmelCase : Any = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
UpperCAmelCase : Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase : Optional[Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
UpperCAmelCase : Dict = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
UpperCAmelCase : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
UpperCAmelCase : Tuple = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
UpperCAmelCase : str = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
UpperCAmelCase : Dict = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
UpperCAmelCase : int = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
UpperCAmelCase : Tuple = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
UpperCAmelCase : int = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
UpperCAmelCase : List[str] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase : str = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
UpperCAmelCase : List[str] = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
UpperCAmelCase : Any = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
UpperCAmelCase : Optional[int] = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
UpperCAmelCase : Tuple = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase : Dict = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase : int = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase : Optional[Any] = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase : List[Any] = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase : Optional[int] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase : str = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase : List[str] = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase : Tuple = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase : int = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
UpperCAmelCase : Optional[int] = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
UpperCAmelCase : Dict = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
UpperCAmelCase : Any = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
UpperCAmelCase : Optional[int] = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
UpperCAmelCase : List[Any] = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
UpperCAmelCase : Optional[int] = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
UpperCAmelCase : Optional[Any] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
UpperCAmelCase : Optional[int] = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
UpperCAmelCase : List[Any] = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
UpperCAmelCase : List[str] = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
UpperCAmelCase : List[Any] = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
UpperCAmelCase : List[Any] = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
UpperCAmelCase : Tuple = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Optional[int] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
UpperCAmelCase : Tuple = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Tuple = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : int = in_proj_bias[: config.hidden_size]
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ) -> Any:
UpperCAmelCase , UpperCAmelCase : int = get_dpt_config(_lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
UpperCAmelCase : List[Any] = torch.load(_lowerCAmelCase , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(_lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase : Any = state_dict.pop(_lowerCAmelCase )
UpperCAmelCase : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
UpperCAmelCase : Optional[Any] = DPTForSemanticSegmentation(_lowerCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Check outputs on an image
UpperCAmelCase : int = 480 if '''ade''' in checkpoint_url else 384
UpperCAmelCase : List[Any] = DPTImageProcessor(size=_lowerCAmelCase )
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(_lowerCAmelCase , return_tensors='''pt''' )
# forward pass
UpperCAmelCase : Any = model(**_lowerCAmelCase ).logits if '''ade''' in checkpoint_url else model(**_lowerCAmelCase ).predicted_depth
if show_prediction:
UpperCAmelCase : Dict = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=_lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
UpperCamelCase__: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
UpperCamelCase__: Tuple = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 23 | 0 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
_SCREAMING_SNAKE_CASE = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
_SCREAMING_SNAKE_CASE = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
_SCREAMING_SNAKE_CASE = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
return float((preds == labels).mean() )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase = float(fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = np.array(_lowerCAmelCase )
UpperCamelCase = np.array(_lowerCAmelCase )
UpperCamelCase = en_sentvecs.shape[0]
# mean centering
UpperCamelCase = en_sentvecs - np.mean(_lowerCAmelCase , axis=0 )
UpperCamelCase = in_sentvecs - np.mean(_lowerCAmelCase , axis=0 )
UpperCamelCase = cdist(_lowerCAmelCase , _lowerCAmelCase , """cosine""" )
UpperCamelCase = np.array(range(_lowerCAmelCase ) )
UpperCamelCase = sim.argsort(axis=1 )[:, :10]
UpperCamelCase = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any ):
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__snake_case , __snake_case )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__snake_case , __snake_case )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__snake_case , __snake_case )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
| 343 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase__: Optional[int] = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 23 | 0 |
'''simple docstring'''
import sys
from collections import defaultdict
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Any ):
snake_case__ : Union[str, Any] = []
def lowerCamelCase ( self : str , snake_case_ : Any ):
return self.node_position[vertex]
def lowerCamelCase ( self : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[Any] ):
snake_case__ : Optional[Any] = pos
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Dict , snake_case_ : str ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case__ : Union[str, Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case__ : Tuple = 2 * start + 1
else:
snake_case__ : List[str] = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case__ : str = heap[smallest_child], positions[smallest_child]
snake_case__ : str = (
heap[start],
positions[start],
)
snake_case__ : Any = temp, tempa
snake_case__ : Union[str, Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __snake_case )
self.top_to_bottom(__snake_case , __snake_case , __snake_case , __snake_case )
def lowerCamelCase ( self : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : str ):
snake_case__ : Union[str, Any] = position[index]
while index != 0:
snake_case__ : List[str] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
snake_case__ : List[Any] = heap[parent]
snake_case__ : str = position[parent]
self.set_position(position[parent] , __snake_case )
else:
snake_case__ : Optional[Any] = val
snake_case__ : Dict = temp
self.set_position(__snake_case , __snake_case )
break
snake_case__ : Optional[int] = parent
else:
snake_case__ : Any = val
snake_case__ : Optional[int] = temp
self.set_position(__snake_case , 0 )
def lowerCamelCase ( self : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
snake_case__ : Tuple = len(__snake_case ) // 2 - 1
for i in range(__snake_case , -1 , -1 ):
self.top_to_bottom(__snake_case , __snake_case , len(__snake_case ) , __snake_case )
def lowerCamelCase ( self : Dict , snake_case_ : Optional[Any] , snake_case_ : int ):
snake_case__ : List[Any] = positions[0]
snake_case__ : Optional[int] = sys.maxsize
self.top_to_bottom(__snake_case , 0 , len(__snake_case ) , __snake_case )
return temp
def __snake_case( _lowerCAmelCase ) -> str:
snake_case__ : Union[str, Any] = Heap()
snake_case__ : str = [0] * len(_lowerCAmelCase )
snake_case__ : str = [-1] * len(_lowerCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case__ : Optional[int] = [] # Heap of Distance of vertices from their neighboring vertex
snake_case__ : List[str] = []
for vertex in range(len(_lowerCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCAmelCase )
heap.node_position.append(_lowerCAmelCase )
snake_case__ : List[str] = []
snake_case__ : Tuple = 1
snake_case__ : Union[str, Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case__ : List[Any] = 0
snake_case__ : Tuple = distance
heap.heapify(_lowerCAmelCase , _lowerCAmelCase )
for _ in range(1 , len(_lowerCAmelCase ) ):
snake_case__ : str = heap.delete_minimum(_lowerCAmelCase , _lowerCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case__ : Optional[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCAmelCase )]
):
snake_case__ : Tuple = distance
heap.bottom_to_top(
_lowerCAmelCase , heap.get_position(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Optional[Any] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__a = int(input("Enter number of edges: ").strip())
__a = defaultdict(list)
for _ in range(edges_number):
__a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 35 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : list[int | float] , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | float:
if len(_lowerCAmelCase ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(_lowerCAmelCase )
or left < -len(_lowerCAmelCase )
or right >= len(_lowerCAmelCase )
or right < -len(_lowerCAmelCase )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
UpperCAmelCase : List[Any] = (left + right) >> 1 # the middle
UpperCAmelCase : Optional[Any] = find_max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # find max in range[left, mid]
UpperCAmelCase : Dict = find_max(_lowerCAmelCase , mid + 1 , _lowerCAmelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 23 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
snake_case_ = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['DPTFeatureExtractor']
snake_case_ = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 24 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def a (*a__ : List[str] , **a__ : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
snake_case_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def a (self : List[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model=a__ , tokenizer=a__ , image_processor=a__ )
__snake_case = INVOICE_URL
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
__snake_case = '''What is the placebo?'''
__snake_case = [
{
'''image''': load_image(a__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def a (self : Union[str, Any] , a__ : Optional[int] , a__ : Dict ):
"""simple docstring"""
__snake_case = dqa_pipeline(a__ , top_k=2 )
self.assertEqual(
a__ , [
[
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def a (self : Dict ):
"""simple docstring"""
__snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__snake_case = INVOICE_URL
__snake_case = '''How many cats are there?'''
__snake_case = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(a__ , [] )
# We can optionnally pass directly the words and bounding boxes
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = []
__snake_case = []
__snake_case = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 )
self.assertEqual(a__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : str ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Tuple ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Dict ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def a (self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def a (self : List[str] ):
"""simple docstring"""
pass
| 24 | 1 |
from __future__ import annotations
snake_case_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowerCamelCase__ ( snake_case_ : list[list[int]] , snake_case_ : list[int] , snake_case_ : list[int] , snake_case_ : int , snake_case_ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case_ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case_ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case_ ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case_ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case_ ) ):
path.append(invpath[len(snake_case_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
snake_case_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
snake_case_ = [0, 0]
# all coordinates are given in format [y,x]
snake_case_ = [len(grid) - 1, len(grid[0]) - 1]
snake_case_ = 1
# the cost map which pushes the path closer to the goal
snake_case_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
snake_case_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
snake_case_ = 99
snake_case_ , snake_case_ = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 24 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]:
__snake_case = []
__snake_case = []
__snake_case = 0
__snake_case = sum(snake_case_ )
create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return result
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None:
if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum:
return
if sum(snake_case_ ) == max_sum:
result.append(snake_case_ )
return
for index in range(snake_case_ , len(snake_case_ ) ):
create_state_space_tree(
snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , )
snake_case_ = [3, 34, 4, 12, 5, 2]
snake_case_ = 9
snake_case_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 24 | 1 |
import json
import sys
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : List[Any] ) -> Optional[int]:
with open(snake_case_ , encoding='''utf-8''' ) as f:
__snake_case = json.load(snake_case_ )
__snake_case = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(snake_case_ ):
__snake_case = results[benchmark_name]
__snake_case = benchmark_name.split('''/''' )[-1]
output_md.append(f"""### Benchmark: {benchmark_file_name}""" )
__snake_case = '''| metric |'''
__snake_case = '''|--------|'''
__snake_case = '''| new / old (diff) |'''
for metric_name in sorted(snake_case_ ):
__snake_case = benchmark_res[metric_name]
__snake_case = metric_vals['''new''']
__snake_case = metric_vals.get('''old''' , snake_case_ )
__snake_case = metric_vals.get('''diff''' , snake_case_ )
__snake_case = f""" {new_val:f}""" if isinstance(snake_case_ , (int, float) ) else '''None'''
if old_val is not None:
val_str += f""" / {old_val:f}""" if isinstance(snake_case_ , (int, float) ) else "None"
if dif_val is not None:
val_str += f""" ({dif_val:f})""" if isinstance(snake_case_ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(snake_case_ ) )
if __name__ == "__main__":
snake_case_ = sys.argv[1]
snake_case_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 24 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_stages
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = out_features
__snake_case = out_indices
__snake_case = scope
def a (self : Dict ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ):
"""simple docstring"""
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case = None
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A_ : Optional[Any] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = True
A_ : Optional[Any] = False
A_ : int = False
A_ : int = False
A_ : List[str] = False
def a (self : List[str] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a (self : Tuple ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : str ):
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def a (self : Dict ):
"""simple docstring"""
def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ):
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : Tuple ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__snake_case = model(**a__ )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ):
A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
A_ : List[Any] = ConvNextConfig
A_ : Optional[Any] = False
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
| 24 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a (self : int , a__ : List[Any]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) )
__snake_case = np.random.RandomState(a__ )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
# warmup pass to apply optimizations
__snake_case = pipe(**self.get_dummy_inputs() )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Any ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def a (self : List[str] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 24 |
def lowerCamelCase__ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(snake_case_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 1 |
from __future__ import annotations
snake_case_ = list[list[int]]
# assigning initial values to the grid
snake_case_ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
snake_case_ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase__ ( snake_case_ : Matrix , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase__ ( snake_case_ : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase__ ( snake_case_ : Matrix ) -> Matrix | None:
if location := find_empty_location(snake_case_ ):
__snake_case , __snake_case = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
__snake_case = digit
if sudoku(snake_case_ ) is not None:
return grid
__snake_case = 0
return None
def lowerCamelCase__ ( snake_case_ : Matrix ) -> None:
for row in grid:
for cell in row:
print(snake_case_ , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
snake_case_ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 24 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = BartphoTokenizer
A_ : List[str] = False
A_ : Optional[Any] = True
def a (self : Tuple ):
"""simple docstring"""
super().setUp()
__snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def a (self : str , **a__ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a (self : str , a__ : Any ):
"""simple docstring"""
__snake_case = '''This is a là test'''
__snake_case = '''This is a<unk><unk> test'''
return input_text, output_text
def a (self : Dict ):
"""simple docstring"""
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case = '''This is a là test'''
__snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
| 24 | 1 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_stages
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = out_features
__snake_case = out_indices
__snake_case = scope
def a (self : Dict ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ):
"""simple docstring"""
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case = None
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A_ : Optional[Any] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = True
A_ : Optional[Any] = False
A_ : int = False
A_ : int = False
A_ : List[str] = False
def a (self : List[str] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a (self : Tuple ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : str ):
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def a (self : Dict ):
"""simple docstring"""
def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ):
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : Tuple ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__snake_case = model(**a__ )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ):
A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
A_ : List[Any] = ConvNextConfig
A_ : Optional[Any] = False
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
| 24 |
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if not isinstance(snake_case_ , snake_case_ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__snake_case = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
def lowerCamelCase__ ( snake_case_ : int = 200 ) -> int:
__snake_case = [1, 2, 5, 10, 20, 50, 100, 200]
__snake_case = [0] * (pence + 1)
__snake_case = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(snake_case_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 24 |
from math import loga
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(snake_case_ , snake_case_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
snake_case_ = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
snake_case_ = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> str:
__snake_case = None
# source code of `config_class`
__snake_case = inspect.getsource(snake_case_ )
__snake_case = _re_checkpoint.findall(snake_case_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
__snake_case = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__snake_case = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
__snake_case = ckpt_name
break
return checkpoint
def lowerCamelCase__ ( ) -> List[Any]:
__snake_case = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__snake_case = get_checkpoint_from_config_class(snake_case_ )
__snake_case = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(snake_case_ )
if len(snake_case_ ) > 0:
__snake_case = '''\n'''.join(sorted(snake_case_ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 24 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[int] , *a__ : Any , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[int] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : Tuple ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : Dict , **a__ : Any ):
"""simple docstring"""
return {}, {}, {}
def a (self : List[str] , a__ : Any ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : int , a__ : List[Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 24 | 1 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
def lowerCamelCase__ ( snake_case_ : Tuple ) -> str:
print('''Loading config file...''' )
def flatten_yaml_as_dict(snake_case_ : Optional[int] , snake_case_ : Optional[int]="" , snake_case_ : Tuple="." ):
__snake_case = []
for k, v in d.items():
__snake_case = parent_key + sep + k if parent_key else k
if isinstance(snake_case_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case_ , snake_case_ , sep=snake_case_ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case_ )
__snake_case = argparse.Namespace()
with open(snake_case_ , '''r''' ) as yaml_file:
try:
__snake_case = yaml.load(snake_case_ , Loader=yaml.FullLoader )
__snake_case = flatten_yaml_as_dict(snake_case_ )
for k, v in flat_cfg.items():
setattr(snake_case_ , snake_case_ , snake_case_ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(snake_case_ , str(snake_case_ ) ) )
return config
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Tuple ) -> Dict:
__snake_case = MobileViTVaConfig()
__snake_case = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__snake_case = 1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__snake_case = 384
else:
__snake_case = 256
__snake_case = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__snake_case = 2_1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__snake_case = 384
else:
__snake_case = 256
__snake_case = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__snake_case = 151
__snake_case = 512
__snake_case = '''ade20k-id2label.json'''
__snake_case = True
elif task_name.startswith('''voc_''' ):
__snake_case = 21
__snake_case = 512
__snake_case = '''pascal-voc-id2label.json'''
__snake_case = True
# orig_config
__snake_case = load_orig_config_file(snake_case_ )
assert getattr(snake_case_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
__snake_case = getattr(snake_case_ , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(snake_case_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__snake_case = getattr(snake_case_ , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__snake_case = getattr(snake_case_ , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
__snake_case = getattr(snake_case_ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
__snake_case = getattr(snake_case_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
__snake_case = getattr(snake_case_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
__snake_case = '''huggingface/label-files'''
__snake_case = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
__snake_case = {int(snake_case_ ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : List[Any] ) -> Optional[int]:
__snake_case = dct.pop(snake_case_ )
__snake_case = val
def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : Tuple=False ) -> int:
if base_model:
__snake_case = ''''''
else:
__snake_case = '''mobilevitv2.'''
__snake_case = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__snake_case = k[8:]
else:
__snake_case = k
if ".block." in k:
__snake_case = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
__snake_case = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
__snake_case = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
__snake_case = k_new.replace('''conv_1.''' , f"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if f"""layer_{i}.""" in k:
__snake_case = k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
__snake_case = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
__snake_case = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"""layer_{i}.0.""" in k:
__snake_case = k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if f"""layer_{i}.1.local_rep.0.""" in k:
__snake_case = k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if f"""layer_{i}.1.local_rep.1.""" in k:
__snake_case = k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
__snake_case = [0, 1]
elif i == 4:
__snake_case = [0, 1, 2, 3]
elif i == 5:
__snake_case = [0, 1, 2]
for j in j_in:
if f"""layer_{i}.1.global_rep.{j}.""" in k:
__snake_case = k_new.replace(
f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if f"""layer_{i}.1.global_rep.{j+1}.""" in k:
__snake_case = k_new.replace(
f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if f"""layer_{i}.1.conv_proj.""" in k:
__snake_case = k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
__snake_case = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__snake_case = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
__snake_case = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__snake_case = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__snake_case = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
__snake_case = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
__snake_case = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
__snake_case = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
__snake_case = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCamelCase__ ( snake_case_ : Tuple ) -> List[str]:
__snake_case = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(snake_case_ )
for k in keys_to_ignore:
state_dict.pop(snake_case_ , snake_case_ )
def lowerCamelCase__ ( ) -> str:
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__snake_case = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : int ) -> int:
__snake_case = get_mobilevitva_config(snake_case_ , snake_case_ )
# load original state_dict
__snake_case = torch.load(snake_case_ , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__snake_case = MobileViTVaForSemanticSegmentation(snake_case_ ).eval()
__snake_case = False
else:
__snake_case = MobileViTVaForImageClassification(snake_case_ ).eval()
__snake_case = False
# remove and rename some keys of load the original model
__snake_case = checkpoint
remove_unused_keys(snake_case_ )
__snake_case = create_rename_keys(snake_case_ , base_model=snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
# load modified state_dict
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__snake_case = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' )
__snake_case = model(**snake_case_ )
# verify classification model
if task_name.startswith('''imagenet''' ):
__snake_case = outputs.logits
__snake_case = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__snake_case = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] , snake_case_ , atol=1e-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
snake_case_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 24 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> Any:
assert _test_patching.open is open
__snake_case = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , snake_case_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> List[str]:
# pandas.read_csv is not present in _test_patching
__snake_case = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ):
pass
def lowerCamelCase__ ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__snake_case = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , snake_case_ ) is None
with patch_submodule(_test_patching , '''len''' , snake_case_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = '''__test_patch_submodule_start_and_stop_mock__'''
__snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case = '''__test_patch_submodule_successive_join__'''
__snake_case = '''__test_patch_submodule_successive_dirname__'''
__snake_case = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
| 24 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : int = 4 ) -> list[list[int]]:
__snake_case = abs(snake_case_ ) or 4
return [[1 + x + y * row_size for x in range(snake_case_ )] for y in range(snake_case_ )]
def lowerCamelCase__ ( snake_case_ : list[list[int]] ) -> list[list[int]]:
return reverse_row(transpose(snake_case_ ) )
# OR.. transpose(reverse_column(matrix))
def lowerCamelCase__ ( snake_case_ : list[list[int]] ) -> list[list[int]]:
return reverse_row(reverse_column(snake_case_ ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCamelCase__ ( snake_case_ : list[list[int]] ) -> list[list[int]]:
return reverse_column(transpose(snake_case_ ) )
# OR.. transpose(reverse_row(matrix))
def lowerCamelCase__ ( snake_case_ : list[list[int]] ) -> list[list[int]]:
__snake_case = [list(snake_case_ ) for x in zip(*snake_case_ )]
return matrix
def lowerCamelCase__ ( snake_case_ : list[list[int]] ) -> list[list[int]]:
__snake_case = matrix[::-1]
return matrix
def lowerCamelCase__ ( snake_case_ : list[list[int]] ) -> list[list[int]]:
__snake_case = [x[::-1] for x in matrix]
return matrix
def lowerCamelCase__ ( snake_case_ : list[list[int]] ) -> None:
for i in matrix:
print(*snake_case_ )
if __name__ == "__main__":
snake_case_ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
snake_case_ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
snake_case_ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 24 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
A_ : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
A_ : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
A_ : bool = field(
default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
__snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__snake_case = SeqaSeqDataset
# Get datasets
__snake_case = (
dataset_class(
snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__snake_case = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
__snake_case = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
__snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__snake_case = train_result.metrics
__snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate(metric_key_prefix='''val''' )
__snake_case = data_args.n_val
__snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' )
__snake_case = test_output.metrics
__snake_case = data_args.n_test
if trainer.is_world_process_zero():
__snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
__snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__snake_case = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24 | 1 |
from __future__ import annotations
from random import random
class SCREAMING_SNAKE_CASE__ :
def __init__(self : int , a__ : int | None = None ):
"""simple docstring"""
__snake_case = value
__snake_case = random()
__snake_case = None
__snake_case = None
def __repr__(self : List[Any] ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__(self : int ):
"""simple docstring"""
__snake_case = str(self.value ) + ''' '''
__snake_case = str(self.left or '''''' )
__snake_case = str(self.right or '''''' )
return value + left + right
def lowerCamelCase__ ( snake_case_ : Node | None , snake_case_ : int ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__snake_case , __snake_case = split(root.left , snake_case_ )
return left, root
else:
__snake_case , __snake_case = split(root.right , snake_case_ )
return root, right
def lowerCamelCase__ ( snake_case_ : Node | None , snake_case_ : Node | None ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__snake_case = merge(left.right , snake_case_ )
return left
else:
__snake_case = merge(snake_case_ , right.left )
return right
def lowerCamelCase__ ( snake_case_ : Node | None , snake_case_ : int ) -> Node | None:
__snake_case = Node(snake_case_ )
__snake_case , __snake_case = split(snake_case_ , snake_case_ )
return merge(merge(snake_case_ , snake_case_ ) , snake_case_ )
def lowerCamelCase__ ( snake_case_ : Node | None , snake_case_ : int ) -> Node | None:
__snake_case , __snake_case = split(snake_case_ , value - 1 )
__snake_case , __snake_case = split(snake_case_ , snake_case_ )
return merge(snake_case_ , snake_case_ )
def lowerCamelCase__ ( snake_case_ : Node | None ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def lowerCamelCase__ ( snake_case_ : Node | None , snake_case_ : str ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
__snake_case = insert(snake_case_ , int(arg[1:] ) )
elif arg[0] == "-":
__snake_case = erase(snake_case_ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def lowerCamelCase__ ( ) -> None:
__snake_case = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
__snake_case = input()
while args != "q":
__snake_case = interact_treap(snake_case_ , snake_case_ )
print(snake_case_ )
__snake_case = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 24 |
from math import pi
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 24 | 1 |
def lowerCamelCase__ ( snake_case_ : int = 100 ) -> int:
__snake_case = n * (n + 1) * (2 * n + 1) / 6
__snake_case = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 24 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'vit_msn'
def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
| 24 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case_ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 24 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a (self : int , a__ : List[Any]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) )
__snake_case = np.random.RandomState(a__ )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
# warmup pass to apply optimizations
__snake_case = pipe(**self.get_dummy_inputs() )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Any ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def a (self : List[str] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 24 | 1 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__(self : List[Any] , a__ : Optional[int] , a__ : Optional[int]=13 , a__ : List[Any]=7 , a__ : Dict=True , a__ : Optional[Any]=True , a__ : List[Any]=True , a__ : Optional[Any]=True , a__ : Optional[int]=99 , a__ : Optional[Any]=32 , a__ : List[str]=5 , a__ : Any=4 , a__ : str=37 , a__ : Optional[int]="gelu" , a__ : Optional[Any]=0.1 , a__ : Dict=0.1 , a__ : Any=512 , a__ : Union[str, Any]=16 , a__ : Any=2 , a__ : Optional[int]=0.0_2 , a__ : Optional[int]=4 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = True
__snake_case = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Any = True
A_ : Optional[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a (self : Dict ):
"""simple docstring"""
__snake_case = FlaxRobertaPreLayerNormModelTester(self )
@slow
def a (self : List[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=a__ )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(a__ )
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def a (self : str ):
"""simple docstring"""
__snake_case = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=a__ )
__snake_case = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__snake_case = model(a__ )[0]
__snake_case = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , a__ )
# compare the actual values for a slice.
__snake_case = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , a__ , atol=1E-4 ) )
@slow
def a (self : Any ):
"""simple docstring"""
__snake_case = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=a__ )
__snake_case = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__snake_case = model(a__ )[0]
# compare the actual values for a slice.
__snake_case = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , a__ , atol=1E-4 ) )
| 24 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case_ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : str
A_ : str
A_ : Optional[str] = None
A_ : Optional[str] = None
A_ : Optional[str] = None
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : List[int]
A_ : Optional[List[int]] = None
A_ : Optional[List[int]] = None
A_ : Optional[Union[int, float]] = None
A_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[InputFeatures]
def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = os.path.join(
a__ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , )
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + '''.lock'''
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case = torch.load(a__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case = (
processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
)
logger.info('''Training examples: %s''' , len(a__ ) )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(self.features , a__ )
def __len__(self : int ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Dict , a__ : List[Any] ):
"""simple docstring"""
return self.features[i]
def a (self : List[Any] ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
A_ : List[InputFeatures]
def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
__snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case = tf.data.Dataset.from_generator(
a__ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def a (self : Union[str, Any] ):
"""simple docstring"""
return self.dataset
def __len__(self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Any , a__ : Dict ):
"""simple docstring"""
return self.features[i]
def a (self : str ):
"""simple docstring"""
return self.label_list
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Dict , a__ : Dict ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def a (self : Optional[int] , a__ : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def a (self : int ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def a (self : Any , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = []
for i, line in enumerate(a__ ):
if i == 0:
continue
__snake_case = '''%s-%s''' % (set_type, line[0])
__snake_case = line[5]
__snake_case = line[6]
__snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case = line[0]
examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) )
return examples
def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]:
__snake_case = {label: i for i, label in enumerate(snake_case_ )}
__snake_case = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
__snake_case = label_map[example.label] if example.label in label_map else 0
__snake_case = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
snake_case_ = {
'hans': 3,
}
snake_case_ = {
'hans': HansProcessor,
}
| 24 | 1 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Union[str, Any] = AutoencoderKL
A_ : List[Any] = 'sample'
A_ : List[Any] = 1e-2
@property
def a (self : Dict ):
"""simple docstring"""
__snake_case = 4
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(a__ )
return {"sample": image}
@property
def a (self : Any ):
"""simple docstring"""
return (3, 32, 32)
@property
def a (self : Optional[Any] ):
"""simple docstring"""
return (3, 32, 32)
def a (self : int ):
"""simple docstring"""
__snake_case = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def a (self : Tuple ):
"""simple docstring"""
pass
def a (self : int ):
"""simple docstring"""
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def a (self : int ):
"""simple docstring"""
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**a__ )
model.to(a__ )
assert not model.is_gradient_checkpointing and model.training
__snake_case = model(**a__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case = torch.randn_like(a__ )
__snake_case = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case = self.model_class(**a__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(a__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case = model_a(**a__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__snake_case = dict(model.named_parameters() )
__snake_case = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def a (self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(a__ )
__snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
__snake_case = model.to(a__ )
model.eval()
if torch_device == "mps":
__snake_case = torch.manual_seed(0 )
else:
__snake_case = torch.Generator(device=a__ ).manual_seed(0 )
__snake_case = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case = image.to(a__ )
with torch.no_grad():
__snake_case = model(a__ , sample_posterior=a__ , generator=a__ ).sample
__snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
__snake_case = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__snake_case = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(a__ , a__ , rtol=1E-2 ) )
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Tuple , a__ : Optional[Any] , a__ : List[str] ):
"""simple docstring"""
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(a__ ) for s in shape] )}.npy"""
def a (self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a (self : Optional[int] , a__ : Optional[Any]=0 , a__ : List[Any]=(4, 3, 512, 512) , a__ : Optional[Any]=False ):
"""simple docstring"""
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = torch.from_numpy(load_hf_numpy(self.get_file_format(a__ , a__ ) ) ).to(a__ ).to(a__ )
return image
def a (self : Optional[Any] , a__ : Dict="CompVis/stable-diffusion-v1-4" , a__ : List[Any]=False ):
"""simple docstring"""
__snake_case = '''fp16''' if fpaa else None
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = AutoencoderKL.from_pretrained(
a__ , subfolder='''vae''' , torch_dtype=a__ , revision=a__ , )
model.to(a__ ).eval()
return model
def a (self : Union[str, Any] , a__ : int=0 ):
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(a__ )
return torch.Generator(device=a__ ).manual_seed(a__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a (self : Optional[int] , a__ : int , a__ : Union[str, Any] , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(a__ )
__snake_case = self.get_generator(a__ )
with torch.no_grad():
__snake_case = model(a__ , generator=a__ , sample_posterior=a__ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(a__ , a__ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def a (self : Union[str, Any] , a__ : Union[str, Any] , a__ : str ):
"""simple docstring"""
__snake_case = self.get_sd_vae_model(fpaa=a__ )
__snake_case = self.get_sd_image(a__ , fpaa=a__ )
__snake_case = self.get_generator(a__ )
with torch.no_grad():
__snake_case = model(a__ , generator=a__ , sample_posterior=a__ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(a__ )
assert torch_all_close(a__ , a__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a (self : Optional[Any] , a__ : str , a__ : Tuple , a__ : List[Any] ):
"""simple docstring"""
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(a__ )
with torch.no_grad():
__snake_case = model(a__ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(a__ , a__ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def a (self : str , a__ : Optional[int] , a__ : Any ):
"""simple docstring"""
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(a__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case = torch.tensor(a__ )
assert torch_all_close(a__ , a__ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def a (self : List[Any] , a__ : Any , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = self.get_sd_vae_model(fpaa=a__ )
__snake_case = self.get_sd_image(a__ , shape=(3, 4, 64, 64) , fpaa=a__ )
with torch.no_grad():
__snake_case = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(a__ )
assert torch_all_close(a__ , a__ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def a (self : Tuple , a__ : Any ):
"""simple docstring"""
__snake_case = self.get_sd_vae_model(fpaa=a__ )
__snake_case = self.get_sd_image(a__ , shape=(3, 4, 64, 64) , fpaa=a__ )
with torch.no_grad():
__snake_case = model.decode(a__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(a__ , a__ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def a (self : int , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(a__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(a__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(a__ , a__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def a (self : str , a__ : Optional[Any] , a__ : List[str] ):
"""simple docstring"""
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(a__ )
__snake_case = self.get_generator(a__ )
with torch.no_grad():
__snake_case = model.encode(a__ ).latent_dist
__snake_case = dist.sample(generator=a__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case = torch.tensor(a__ )
__snake_case = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(a__ , a__ , atol=a__ )
| 24 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'CLIPImageProcessor'
A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ):
"""simple docstring"""
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
__snake_case = kwargs.pop('''feature_extractor''' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
__snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
__snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a__ , **a__ )
def a (self : Any , *a__ : List[Any] , **a__ : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*a__ , **a__ )
@property
def a (self : int ):
"""simple docstring"""
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24 | 1 |
from __future__ import annotations
import numpy as np
def lowerCamelCase__ ( snake_case_ : np.ndarray ) -> tuple[np.ndarray, np.ndarray]:
__snake_case , __snake_case = np.shape(snake_case_ )
if rows != columns:
__snake_case = (
'''\'table\' has to be of square shaped array but got a '''
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(snake_case_ )
__snake_case = np.zeros((rows, columns) )
__snake_case = np.zeros((rows, columns) )
for i in range(snake_case_ ):
for j in range(snake_case_ ):
__snake_case = sum(lower[i][k] * upper[k][j] for k in range(snake_case_ ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
__snake_case = (table[i][j] - total) / upper[j][j]
__snake_case = 1
for j in range(snake_case_ , snake_case_ ):
__snake_case = sum(lower[i][k] * upper[k][j] for k in range(snake_case_ ) )
__snake_case = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_logger()
# the current default level is logging.WARNING
__snake_case = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_verbosity()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(a__ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def a (self : Dict ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ )
__snake_case = logging.log_levels[env_level_str]
__snake_case = logging.get_verbosity()
self.assertEqual(
a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
__snake_case = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def a (self : List[Any] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.logging.getLogger()
with CaptureLogger(a__ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def a (self : Any ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
def lowerCamelCase__ ( ) -> str:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 24 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
snake_case_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
A_ : int = 10_000
A_ : Optional[List[str]] = None
A_ : Optional[datasets.Features] = None
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
A_ : Optional[Any] = ParquetConfig
def a (self : Optional[int] ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def a (self : List[str] , a__ : List[str] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__snake_case = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
__snake_case = data_files
if isinstance(a__ , a__ ):
__snake_case = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__snake_case = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
__snake_case = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , '''rb''' ) as f:
__snake_case = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={'''files''': files} ) )
return splits
def a (self : str , a__ : pa.Table ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def a (self : Any , a__ : Dict ):
"""simple docstring"""
__snake_case = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , '''rb''' ) as f:
__snake_case = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__snake_case = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(a__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(a__ )}: {e}""" )
raise
| 24 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[str] = CpmAntTokenizer
A_ : Optional[int] = False
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
__snake_case = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def a (self : Dict ):
"""simple docstring"""
__snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__snake_case = '''今天天气真好!'''
__snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = '''今天天气真好!'''
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
__snake_case = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 24 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
snake_case_ = NewType('DataClass', Any)
snake_case_ = NewType('DataClassType', Any)
def lowerCamelCase__ ( snake_case_ : str ) -> Optional[int]:
if isinstance(snake_case_ , snake_case_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def lowerCamelCase__ ( snake_case_ : list ) -> Callable[[str], Any]:
__snake_case = {str(snake_case_ ): choice for choice in choices}
return lambda snake_case_ : str_to_choice.get(snake_case_ , snake_case_ )
def lowerCamelCase__ ( *,
snake_case_ : Union[str, List[str]] = None , snake_case_ : str = None , snake_case_ : Any = dataclasses.MISSING , snake_case_ : Callable[[], Any] = dataclasses.MISSING , snake_case_ : dict = None , **snake_case_ : int , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__snake_case = {}
if aliases is not None:
__snake_case = aliases
if help is not None:
__snake_case = help
return dataclasses.field(metadata=snake_case_ , default=snake_case_ , default_factory=snake_case_ , **snake_case_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Iterable[DataClassType]
def __init__(self : Tuple , a__ : Union[DataClassType, Iterable[DataClassType]] , **a__ : Optional[Any] ):
"""simple docstring"""
if "formatter_class" not in kwargs:
__snake_case = ArgumentDefaultsHelpFormatter
super().__init__(**a__ )
if dataclasses.is_dataclass(a__ ):
__snake_case = [dataclass_types]
__snake_case = list(a__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(a__ )
@staticmethod
def a (a__ : ArgumentParser , a__ : dataclasses.Field ):
"""simple docstring"""
__snake_case = f"""--{field.name}"""
__snake_case = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , a__ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__snake_case = kwargs.pop('''aliases''' , [] )
if isinstance(a__ , a__ ):
__snake_case = [aliases]
__snake_case = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(a__ , '''UnionType''' ) and isinstance(a__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(a__ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(a__ ) not in field.type.__args__:
# filter `str` in Union
__snake_case = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__snake_case = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__snake_case = (
field.type.__args__[0] if isinstance(a__ , field.type.__args__[1] ) else field.type.__args__[1]
)
__snake_case = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__snake_case = {}
if origin_type is Literal or (isinstance(field.type , a__ ) and issubclass(field.type , a__ )):
if origin_type is Literal:
__snake_case = field.type.__args__
else:
__snake_case = [x.value for x in field.type]
__snake_case = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__snake_case = field.default
else:
__snake_case = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__snake_case = copy(a__ )
# Hack because type=bool in argparse does not behave as we want.
__snake_case = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__snake_case = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__snake_case = default
# This tells argparse we accept 0 or 1 value after --field_name
__snake_case = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__snake_case = True
elif isclass(a__ ) and issubclass(a__ , a__ ):
__snake_case = field.type.__args__[0]
__snake_case = '''+'''
if field.default_factory is not dataclasses.MISSING:
__snake_case = field.default_factory()
elif field.default is dataclasses.MISSING:
__snake_case = True
else:
__snake_case = field.type
if field.default is not dataclasses.MISSING:
__snake_case = field.default
elif field.default_factory is not dataclasses.MISSING:
__snake_case = field.default_factory()
else:
__snake_case = True
parser.add_argument(a__ , *a__ , **a__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__snake_case = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **a__ )
def a (self : int , a__ : DataClassType ):
"""simple docstring"""
if hasattr(a__ , '''_argument_group_name''' ):
__snake_case = self.add_argument_group(dtype._argument_group_name )
else:
__snake_case = self
try:
__snake_case = get_type_hints(a__ )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(a__ ):
__snake_case = '''.'''.join(map(a__ , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(a__ ):
if not field.init:
continue
__snake_case = type_hints[field.name]
self._parse_dataclass_field(a__ , a__ )
def a (self : Union[str, Any] , a__ : List[Any]=None , a__ : List[Any]=False , a__ : str=True , a__ : List[Any]=None , a__ : Tuple=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__snake_case = []
if args_filename:
args_files.append(Path(a__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__snake_case = ArgumentParser()
args_file_parser.add_argument(a__ , type=a__ , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__snake_case , __snake_case = args_file_parser.parse_known_args(args=a__ )
__snake_case = vars(a__ ).get(args_file_flag.lstrip('''-''' ) , a__ )
if cmd_args_file_paths:
args_files.extend([Path(a__ ) for p in cmd_args_file_paths] )
__snake_case = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__snake_case = file_args + args if args is not None else file_args + sys.argv[1:]
__snake_case , __snake_case = self.parse_known_args(args=a__ )
__snake_case = []
for dtype in self.dataclass_types:
__snake_case = {f.name for f in dataclasses.fields(a__ ) if f.init}
__snake_case = {k: v for k, v in vars(a__ ).items() if k in keys}
for k in keys:
delattr(a__ , a__ )
__snake_case = dtype(**a__ )
outputs.append(a__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(a__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def a (self : Dict , a__ : Dict[str, Any] , a__ : bool = False ):
"""simple docstring"""
__snake_case = set(args.keys() )
__snake_case = []
for dtype in self.dataclass_types:
__snake_case = {f.name for f in dataclasses.fields(a__ ) if f.init}
__snake_case = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__snake_case = dtype(**a__ )
outputs.append(a__ )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(a__ )}""" )
return tuple(a__ )
def a (self : Union[str, Any] , a__ : str , a__ : bool = False ):
"""simple docstring"""
with open(Path(a__ ) , encoding='''utf-8''' ) as open_json_file:
__snake_case = json.loads(open_json_file.read() )
__snake_case = self.parse_dict(a__ , allow_extra_keys=a__ )
return tuple(a__ )
def a (self : Union[str, Any] , a__ : str , a__ : bool = False ):
"""simple docstring"""
__snake_case = self.parse_dict(yaml.safe_load(Path(a__ ).read_text() ) , allow_extra_keys=a__ )
return tuple(a__ )
| 24 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = out_indices if out_indices is not None else [4]
__snake_case = stage_names
__snake_case = out_features
__snake_case = backbone
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = use_pretrained_backbone
__snake_case = is_training
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = self.get_config()
return config, pixel_values
def a (self : Any ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def a (self : List[Any] , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = TimmBackbone(config=a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(a__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def a (self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
A_ : List[Any] = False
A_ : Dict = False
A_ : Any = False
A_ : List[Any] = False
def a (self : Tuple ):
"""simple docstring"""
__snake_case = TimmBackboneModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def a (self : Any ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : int ):
"""simple docstring"""
__snake_case = '''resnet18'''
__snake_case = '''microsoft/resnet-18'''
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ )
__snake_case = AutoBackbone.from_pretrained(a__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] )
__snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def a (self : str ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
def a (self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case = self.all_model_classes[0]
__snake_case = model_class(a__ )
model.to(a__ )
__snake_case = self._prepare_for_class(a__ , a__ )
__snake_case = model(**a__ )
__snake_case = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case = copy.deepcopy(a__ )
__snake_case = None
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case = copy.deepcopy(a__ )
__snake_case = False
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
| 24 | 1 |
from __future__ import annotations
import math
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int , snake_case_ : bool , snake_case_ : list[int] , snake_case_ : float ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , snake_case_ , snake_case_ , snake_case_ ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case_ , snake_case_ , snake_case_ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , snake_case_ , snake_case_ , snake_case_ ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case_ , snake_case_ , snake_case_ ) , )
)
def lowerCamelCase__ ( ) -> None:
__snake_case = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case = math.log(len(snake_case_ ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , snake_case_ , snake_case_ , snake_case_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 24 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case_ = '\nimport os\n'
snake_case_ = '\ndef foo():\n import os\n return False\n'
snake_case_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
snake_case_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case_ )
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Dict:
__snake_case = os.path.join(snake_case_ , '''test_file.py''' )
with open(snake_case_ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case_ )
__snake_case = get_imports(snake_case_ )
assert parsed_imports == ["os"]
| 24 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
snake_case_ = None
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
snake_case_ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
snake_case_ = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
snake_case_ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Dict = VOCAB_FILES_NAMES
A_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = PRETRAINED_VOCAB_FILES_MAP
A_ : int = ['input_ids', 'attention_mask']
A_ : Tuple = MBartTokenizer
A_ : List[int] = []
A_ : List[int] = []
def __init__(self : Optional[int] , a__ : str=None , a__ : int=None , a__ : Any="<s>" , a__ : Any="</s>" , a__ : Any="</s>" , a__ : List[str]="<s>" , a__ : List[str]="<unk>" , a__ : Dict="<pad>" , a__ : List[Any]="<mask>" , a__ : Union[str, Any]=None , a__ : Any=None , a__ : Union[str, Any]=None , **a__ : int , ):
"""simple docstring"""
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
vocab_file=a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , sep_token=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , src_lang=a__ , tgt_lang=a__ , additional_special_tokens=a__ , **a__ , )
__snake_case = vocab_file
__snake_case = False if not self.vocab_file else True
__snake_case = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__snake_case = {
lang_code: self.convert_tokens_to_ids(a__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case = src_lang if src_lang is not None else '''en_XX'''
__snake_case = self.convert_tokens_to_ids(self._src_lang )
__snake_case = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a (self : Optional[int] ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a (self : Tuple , a__ : str ):
"""simple docstring"""
__snake_case = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a (self : Tuple , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a (self : List[Any] , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a (self : List[Any] , a__ : Optional[Any] , a__ : str , a__ : Optional[str] , a__ : Optional[str] , **a__ : int ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__snake_case = src_lang
__snake_case = self(a__ , add_special_tokens=a__ , return_tensors=a__ , **a__ )
__snake_case = self.convert_tokens_to_ids(a__ )
__snake_case = tgt_lang_id
return inputs
def a (self : Tuple , a__ : List[str] , a__ : str = "en_XX" , a__ : Optional[List[str]] = None , a__ : str = "ro_RO" , **a__ : Tuple , ):
"""simple docstring"""
__snake_case = src_lang
__snake_case = tgt_lang
return super().prepare_seqaseq_batch(a__ , a__ , **a__ )
def a (self : str ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def a (self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a (self : Optional[int] , a__ : str ):
"""simple docstring"""
__snake_case = self.convert_tokens_to_ids(a__ )
__snake_case = []
__snake_case = [self.eos_token_id, self.cur_lang_code]
__snake_case = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a (self : Union[str, Any] , a__ : str ):
"""simple docstring"""
__snake_case = self.convert_tokens_to_ids(a__ )
__snake_case = []
__snake_case = [self.eos_token_id, self.cur_lang_code]
__snake_case = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a (self : int , a__ : str , a__ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__snake_case = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 24 |
import socket
def lowerCamelCase__ ( ) -> Any:
__snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__snake_case = socket.gethostname()
__snake_case = 1_2312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
__snake_case = sock.recv(1024 )
if not data:
break
out_file.write(snake_case_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 24 | 1 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
snake_case_ = logging.getLogger(__name__)
def lowerCamelCase__ ( snake_case_ : torch.nn.Module , snake_case_ : BnbQuantizationConfig , snake_case_ : Union[str, os.PathLike] = None , snake_case_ : Optional[Dict[str, Union[int, str, torch.device]]] = None , snake_case_ : Optional[List[str]] = None , snake_case_ : Optional[Dict[Union[int, str], Union[int, str]]] = None , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , ) -> Optional[Any]:
__snake_case = bnb_quantization_config.load_in_abit
__snake_case = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
__snake_case = []
# custom device map
if isinstance(snake_case_ , snake_case_ ) and len(device_map.keys() ) > 1:
__snake_case = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
__snake_case = get_keys_to_not_convert(snake_case_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case_ )
__snake_case = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
__snake_case = []
__snake_case = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case_ )
# compatibility with peft
__snake_case = load_in_abit
__snake_case = load_in_abit
__snake_case = get_parameter_device(snake_case_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
__snake_case = replace_with_bnb_layers(snake_case_ , snake_case_ , modules_to_not_convert=snake_case_ )
# convert param to the right dtype
__snake_case = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
__snake_case = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
__snake_case = getattr(snake_case_ , snake_case_ , snake_case_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(snake_case_ ):
param.to(snake_case_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
__snake_case = replace_with_bnb_layers(
snake_case_ , snake_case_ , modules_to_not_convert=snake_case_ )
__snake_case = get_quantized_model_device_map(
snake_case_ , snake_case_ , snake_case_ , max_memory=snake_case_ , no_split_module_classes=snake_case_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
__snake_case = True
__snake_case = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
snake_case_ , snake_case_ , snake_case_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case_ , offload_state_dict=snake_case_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case_ , device_map=snake_case_ , offload_dir=snake_case_ )
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Any , snake_case_ : List[Any]=None , snake_case_ : int=None , snake_case_ : Dict=None ) -> Any:
if device_map is None:
if torch.cuda.is_available():
__snake_case = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(snake_case_ , snake_case_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
__snake_case = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
__snake_case = {}
__snake_case = special_dtypes
__snake_case = no_split_module_classes
__snake_case = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
__snake_case = get_balanced_memory(
snake_case_ , low_zero=(device_map == '''balanced_low_0''') , max_memory=snake_case_ , **snake_case_ , )
__snake_case = max_memory
__snake_case = infer_auto_device_map(snake_case_ , **snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
# check if don't have any quantized module on the cpu
__snake_case = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
__snake_case = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : Any , snake_case_ : Union[str, Any]=None , snake_case_ : Any=None ) -> int:
if modules_to_not_convert is None:
__snake_case = []
__snake_case , __snake_case = _replace_with_bnb_layers(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : str=None , snake_case_ : int=None , ) -> Any:
__snake_case = False
for name, module in model.named_children():
if current_key_name is None:
__snake_case = []
current_key_name.append(snake_case_ )
if isinstance(snake_case_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
__snake_case = '''.'''.join(snake_case_ )
__snake_case = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
__snake_case = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
__snake_case = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
__snake_case = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
__snake_case = module.weight.data
if module.bias is not None:
__snake_case = module.bias.data
bnb_module.requires_grad_(snake_case_ )
setattr(snake_case_ , snake_case_ , snake_case_ )
__snake_case = True
if len(list(module.children() ) ) > 0:
__snake_case , __snake_case = _replace_with_bnb_layers(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__snake_case = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCamelCase__ ( snake_case_ : int ) -> Dict:
# Create a copy of the model
with init_empty_weights():
__snake_case = deepcopy(snake_case_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
__snake_case = find_tied_parameters(snake_case_ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case_ , snake_case_ ):
__snake_case = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__snake_case = sum(snake_case_ , [] )
__snake_case = len(snake_case_ ) > 0
# Check if it is a base model
__snake_case = False
if hasattr(snake_case_ , '''base_model_prefix''' ):
__snake_case = not hasattr(snake_case_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__snake_case = list(model.named_children() )
__snake_case = [list_modules[-1][0]]
# add last module together with tied weights
__snake_case = set(snake_case_ ) - set(snake_case_ )
__snake_case = list(set(snake_case_ ) ) + list(snake_case_ )
# remove ".weight" from the keys
__snake_case = ['''.weight''', '''.bias''']
__snake_case = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__snake_case = name.replace(snake_case_ , '''''' )
filtered_module_names.append(snake_case_ )
return filtered_module_names
def lowerCamelCase__ ( snake_case_ : str ) -> str:
for m in model.modules():
if isinstance(snake_case_ , bnb.nn.Linearabit ):
return True
return False
def lowerCamelCase__ ( snake_case_ : nn.Module ) -> Dict:
return next(parameter.parameters() ).device
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Dict ) -> List[str]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case_ , snake_case_ , 0 , dtype=snake_case_ , value=snake_case_ )
__snake_case = param_name
__snake_case = model
if "." in tensor_name:
__snake_case = tensor_name.split('''.''' )
for split in splits[:-1]:
__snake_case = getattr(snake_case_ , snake_case_ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
__snake_case = new_module
__snake_case = splits[-1]
# offload weights
__snake_case = False
offload_weight(module._parameters[tensor_name] , snake_case_ , snake_case_ , index=snake_case_ )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , snake_case_ , index=snake_case_ , )
else:
offload_weight(snake_case_ , snake_case_ , snake_case_ , index=snake_case_ )
offload_weight(snake_case_ , param_name.replace('''weight''' , '''SCB''' ) , snake_case_ , index=snake_case_ )
set_module_tensor_to_device(snake_case_ , snake_case_ , '''meta''' , dtype=snake_case_ , value=torch.empty(*param.size() ) )
| 24 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive
__snake_case = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__snake_case = array[0]
__snake_case = False
__snake_case = 1
__snake_case = []
while not is_found and i < array_length:
if array[i] < pivot:
__snake_case = True
__snake_case = [element for element in array[i:] if element >= array[i]]
__snake_case = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
__snake_case = temp_array
else:
i += 1
__snake_case = [element for element in array[1:] if element >= pivot]
__snake_case = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
snake_case_ = None
try:
import msvcrt
except ImportError:
snake_case_ = None
try:
import fcntl
except ImportError:
snake_case_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
snake_case_ = OSError
# Data
# ------------------------------------------------
snake_case_ = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
snake_case_ = '3.0.12'
snake_case_ = None
def lowerCamelCase__ ( ) -> Dict:
global _logger
__snake_case = _logger or logging.getLogger(__name__ )
return _logger
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Union[str, Any] , a__ : int ):
"""simple docstring"""
__snake_case = lock_file
return None
def __str__(self : List[str] ):
"""simple docstring"""
__snake_case = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Union[str, Any] , a__ : List[str] ):
"""simple docstring"""
__snake_case = lock
return None
def __enter__(self : Optional[Any] ):
"""simple docstring"""
return self.lock
def __exit__(self : List[str] , a__ : Optional[Any] , a__ : Optional[int] , a__ : Optional[int] ):
"""simple docstring"""
self.lock.release()
return None
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Union[str, Any] , a__ : int , a__ : str=-1 , a__ : Tuple=None ):
"""simple docstring"""
__snake_case = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__snake_case = self.hash_filename_if_too_long(a__ , a__ )
# The path to the lock file.
__snake_case = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__snake_case = None
# The default timeout value.
__snake_case = timeout
# We use this lock primarily for the lock counter.
__snake_case = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__snake_case = 0
return None
@property
def a (self : Any ):
"""simple docstring"""
return self._lock_file
@property
def a (self : Dict ):
"""simple docstring"""
return self._timeout
@timeout.setter
def a (self : Optional[Any] , a__ : Optional[Any] ):
"""simple docstring"""
__snake_case = float(a__ )
return None
def a (self : Optional[int] ):
"""simple docstring"""
raise NotImplementedError()
def a (self : Optional[int] ):
"""simple docstring"""
raise NotImplementedError()
@property
def a (self : List[str] ):
"""simple docstring"""
return self._lock_file_fd is not None
def a (self : Dict , a__ : Any=None , a__ : Optional[Any]=0.0_5 ):
"""simple docstring"""
if timeout is None:
__snake_case = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__snake_case = id(self )
__snake_case = self._lock_file
__snake_case = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(a__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__snake_case = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def a (self : List[Any] , a__ : Union[str, Any]=False ):
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__snake_case = id(self )
__snake_case = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__snake_case = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__(self : Union[str, Any] ):
"""simple docstring"""
self.acquire()
return self
def __exit__(self : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : Optional[int] ):
"""simple docstring"""
self.release()
return None
def __del__(self : str ):
"""simple docstring"""
self.release(force=a__ )
return None
def a (self : Dict , a__ : str , a__ : int ):
"""simple docstring"""
__snake_case = os.path.basename(a__ )
if len(a__ ) > max_length and max_length > 0:
__snake_case = os.path.dirname(a__ )
__snake_case = str(hash(a__ ) )
__snake_case = filename[: max_length - len(a__ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(a__ , a__ )
else:
return path
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[Any] , a__ : List[str] , a__ : List[Any]=-1 , a__ : Tuple=None ):
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(a__ , timeout=a__ , max_filename_length=a__ )
__snake_case = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__snake_case = os.open(self._lock_file , a__ )
except OSError:
pass
else:
try:
msvcrt.locking(a__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(a__ )
else:
__snake_case = fd
return None
def a (self : int ):
"""simple docstring"""
__snake_case = self._lock_file_fd
__snake_case = None
msvcrt.locking(a__ , msvcrt.LK_UNLCK , 1 )
os.close(a__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Union[str, Any] , a__ : Any , a__ : List[Any]=-1 , a__ : Tuple=None ):
"""simple docstring"""
__snake_case = os.statvfs(os.path.dirname(a__ ) ).f_namemax
super().__init__(a__ , timeout=a__ , max_filename_length=a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__snake_case = os.open(self._lock_file , a__ )
try:
fcntl.flock(a__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(a__ )
else:
__snake_case = fd
return None
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self._lock_file_fd
__snake_case = None
fcntl.flock(a__ , fcntl.LOCK_UN )
os.close(a__ )
return None
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__snake_case = os.open(self._lock_file , a__ )
except OSError:
pass
else:
__snake_case = fd
return None
def a (self : Any ):
"""simple docstring"""
os.close(self._lock_file_fd )
__snake_case = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
snake_case_ = None
if msvcrt:
snake_case_ = WindowsFileLock
elif fcntl:
snake_case_ = UnixFileLock
else:
snake_case_ = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 24 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = scope
__snake_case = range_bbox
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = t
__snake_case = None
if self.use_input_mask:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def a (self : List[str] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ):
"""simple docstring"""
__snake_case = LiltModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = LiltForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ):
"""simple docstring"""
__snake_case = LiltForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Any = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Optional[int] = False
A_ : List[Any] = False
def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
return True
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = LiltModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def a (self : Optional[int] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = LiltModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Tuple ):
"""simple docstring"""
__snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ )
__snake_case = torch.tensor([[1, 2]] , device=a__ )
__snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ )
# forward pass
with torch.no_grad():
__snake_case = model(input_ids=a__ , bbox=a__ )
__snake_case = torch.Size([1, 2, 768] )
__snake_case = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , )
self.assertTrue(outputs.last_hidden_state.shape , a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
| 24 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = LongformerTokenizer
A_ : Any = True
A_ : Tuple = LongformerTokenizerFast
A_ : Dict = True
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a__ ) )
def a (self : Dict , **a__ : Optional[int] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def a (self : Any , **a__ : Optional[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def a (self : Optional[int] , a__ : Dict ):
"""simple docstring"""
__snake_case = '''lower newer'''
__snake_case = '''lower newer'''
return input_text, output_text
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case = '''lower newer'''
__snake_case = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__snake_case = tokenizer.tokenize(a__ ) # , add_prefix_space=True)
self.assertListEqual(a__ , a__ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=a__ ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=a__ ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def a (self : Any ):
"""simple docstring"""
__snake_case = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__snake_case = tokenizer.encode('''sequence builders''' , add_special_tokens=a__ )
__snake_case = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a__ )
__snake_case = tokenizer.encode(
'''sequence builders''' , add_special_tokens=a__ , add_prefix_space=a__ )
__snake_case = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=a__ , add_prefix_space=a__ )
__snake_case = tokenizer.build_inputs_with_special_tokens(a__ )
__snake_case = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.get_tokenizer()
__snake_case = '''Encode this sequence.'''
__snake_case = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__snake_case = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(a__ , a__ )
__snake_case = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(a__ , a__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__snake_case = tokenizer.encode(a__ , add_special_tokens=a__ )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(a__ , a__ )
# Testing spaces after special tokens
__snake_case = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(a__ , lstrip=a__ , rstrip=a__ )} ) # mask token has a left space
__snake_case = tokenizer.convert_tokens_to_ids(a__ )
__snake_case = '''Encode <mask> sequence'''
__snake_case = '''Encode <mask>sequence'''
__snake_case = tokenizer.encode(a__ )
__snake_case = encoded.index(a__ )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(a__ , a__ )
__snake_case = tokenizer.encode(a__ )
__snake_case = encoded.index(a__ )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(a__ , a__ )
def a (self : Tuple ):
"""simple docstring"""
pass
def a (self : str ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
__snake_case = self.tokenizer_class.from_pretrained(a__ , **a__ )
__snake_case = '''A, <mask> AllenNLP sentence.'''
__snake_case = tokenizer_r.encode_plus(a__ , add_special_tokens=a__ , return_token_type_ids=a__ )
__snake_case = tokenizer_p.encode_plus(a__ , add_special_tokens=a__ , return_token_type_ids=a__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
a__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
a__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def a (self : str ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__snake_case = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
__snake_case = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__snake_case = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , a__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , a__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , a__ )
def a (self : Optional[int] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__snake_case = f"""{text_of_1_token} {text_of_1_token}"""
__snake_case = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
__snake_case = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a__ ) + 1, len(a__ ) + 1 + len(a__ )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
__snake_case = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a__ ) + 1, len(a__ ) + 1 + len(a__ )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
__snake_case = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a__ ), len(a__ ) + 1 + len(a__ )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
__snake_case = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a__ ), len(a__ ) + 1 + len(a__ )) , )
__snake_case = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__snake_case = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
__snake_case = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a__ ) + 1, 1 + len(a__ ) + 1 + len(a__ )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
__snake_case = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a__ ), 1 + len(a__ ) + 1 + len(a__ )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
__snake_case = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a__ ), 1 + len(a__ ) + 1 + len(a__ )) , )
| 24 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def a (*a__ : List[str] , **a__ : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
snake_case_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def a (self : List[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model=a__ , tokenizer=a__ , image_processor=a__ )
__snake_case = INVOICE_URL
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
__snake_case = '''What is the placebo?'''
__snake_case = [
{
'''image''': load_image(a__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def a (self : Union[str, Any] , a__ : Optional[int] , a__ : Dict ):
"""simple docstring"""
__snake_case = dqa_pipeline(a__ , top_k=2 )
self.assertEqual(
a__ , [
[
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def a (self : Dict ):
"""simple docstring"""
__snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__snake_case = INVOICE_URL
__snake_case = '''How many cats are there?'''
__snake_case = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(a__ , [] )
# We can optionnally pass directly the words and bounding boxes
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = []
__snake_case = []
__snake_case = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 )
self.assertEqual(a__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : str ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Tuple ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Dict ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def a (self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def a (self : List[str] ):
"""simple docstring"""
pass
| 24 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase__ ( snake_case_ : Tuple ) -> Union[str, Any]:
__snake_case = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
__snake_case = True if '''large''' in model_name or '''huge''' in model_name else False
__snake_case = True if '''large''' in model_name or '''huge''' in model_name else False
__snake_case = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__snake_case = [3, 3, 3, 3]
__snake_case = [5, 5, 5, 5]
elif "fl4" in model_name:
__snake_case = [4, 4, 4, 4]
__snake_case = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__snake_case = [3, 3, 3, 3]
if "lrf" in model_name:
__snake_case = [3, 3, 3, 3]
else:
__snake_case = [2, 2, 2, 2]
if "tiny" in model_name:
__snake_case = 96
elif "small" in model_name:
__snake_case = 96
elif "base" in model_name:
__snake_case = 128
elif "large" in model_name:
__snake_case = 192
elif "xlarge" in model_name:
__snake_case = 256
elif "huge" in model_name:
__snake_case = 352
# set label information
__snake_case = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
__snake_case = '''imagenet-22k-id2label.json'''
else:
__snake_case = '''imagenet-1k-id2label.json'''
__snake_case = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
__snake_case = {int(snake_case_ ): v for k, v in idalabel.items()}
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = FocalNetConfig(
embed_dim=snake_case_ , depths=snake_case_ , focal_levels=snake_case_ , focal_windows=snake_case_ , use_conv_embed=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ , use_post_layernorm=snake_case_ , use_layerscale=snake_case_ , )
return config
def lowerCamelCase__ ( snake_case_ : int ) -> List[str]:
if "patch_embed.proj" in name:
__snake_case = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__snake_case = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__snake_case = '''encoder.''' + name
if "encoder.layers" in name:
__snake_case = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
__snake_case = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
__snake_case = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__snake_case = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__snake_case = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__snake_case = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
__snake_case = '''layernorm.weight'''
if name == "norm.bias":
__snake_case = '''layernorm.bias'''
if "head" in name:
__snake_case = name.replace('''head''' , '''classifier''' )
else:
__snake_case = '''focalnet.''' + name
return name
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any]=False ) -> Dict:
# fmt: off
__snake_case = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
__snake_case = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , snake_case_ )
__snake_case = torch.hub.load_state_dict_from_url(snake_case_ , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
__snake_case = state_dict.pop(snake_case_ )
__snake_case = val
__snake_case = get_focalnet_config(snake_case_ )
__snake_case = FocalNetForImageClassification(snake_case_ )
model.eval()
# load state dict
model.load_state_dict(snake_case_ )
# verify conversion
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = BitImageProcessor(
do_resize=snake_case_ , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=snake_case_ , crop_size=224 , do_normalize=snake_case_ , image_mean=snake_case_ , image_std=snake_case_ , )
__snake_case = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
__snake_case = processor(images=snake_case_ , return_tensors='''pt''' )
__snake_case = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case = image_transforms(snake_case_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , snake_case_ , atol=1e-4 )
__snake_case = model(**snake_case_ )
__snake_case = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__snake_case = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
__snake_case = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
__snake_case = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
__snake_case = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
__snake_case = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
__snake_case = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
snake_case_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]:
__snake_case = []
__snake_case = []
__snake_case = 0
__snake_case = sum(snake_case_ )
create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return result
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None:
if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum:
return
if sum(snake_case_ ) == max_sum:
result.append(snake_case_ )
return
for index in range(snake_case_ , len(snake_case_ ) ):
create_state_space_tree(
snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , )
snake_case_ = [3, 34, 4, 12, 5, 2]
snake_case_ = 9
snake_case_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 24 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
snake_case_ = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__(self : List[Any] , **a__ : Dict ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__snake_case = deprecated_arg[3:]
setattr(self , a__ , not kwargs.pop(a__ ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__snake_case = kwargs.pop('''torchscript''' , self.torchscript )
__snake_case = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
__snake_case = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**a__ )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Trace the models using torchscript'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
A_ : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def a (self : Tuple ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
__snake_case = torch.device('''cpu''' )
__snake_case = 0
elif is_torch_tpu_available():
__snake_case = xm.xla_device()
__snake_case = 0
else:
__snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__snake_case = torch.cuda.device_count()
return device, n_gpu
@property
def a (self : Tuple ):
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def a (self : Tuple ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def a (self : Any ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def a (self : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def a (self : List[str] ):
"""simple docstring"""
return self.n_gpu > 0
| 24 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_stages
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = out_features
__snake_case = out_indices
__snake_case = scope
def a (self : Dict ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ):
"""simple docstring"""
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case = None
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A_ : Optional[Any] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = True
A_ : Optional[Any] = False
A_ : int = False
A_ : int = False
A_ : List[str] = False
def a (self : List[str] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a (self : Tuple ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : str ):
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def a (self : Dict ):
"""simple docstring"""
def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ):
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : Tuple ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__snake_case = model(**a__ )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ):
A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
A_ : List[Any] = ConvNextConfig
A_ : Optional[Any] = False
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
| 24 | 1 |
snake_case_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
snake_case_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
snake_case_ = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> str:
assert len(str(snake_case_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__snake_case = year // 100
__snake_case = (5 * (century % 4) + 2) % 7
__snake_case = year % 100
__snake_case = centurian % 12
__snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
def lowerCamelCase__ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(snake_case_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 1 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Tuple , *a__ : Union[str, Any] , a__ : Tuple=None , a__ : Optional[int]=None , **a__ : int ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
__snake_case = eval_examples
__snake_case = post_process_function
def a (self : int , a__ : int=None , a__ : Union[str, Any]=None , a__ : Optional[Any]=None , a__ : str = "eval" ):
"""simple docstring"""
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(a__ )
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case = time.time()
try:
__snake_case = eval_loop(
a__ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a__ , metric_key_prefix=a__ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
a__ , a__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(a__ , a__ , output.predictions )
__snake_case = self.compute_metrics(a__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__snake_case = metrics.pop(a__ )
metrics.update(output.metrics )
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(a__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , a__ )
return metrics
def a (self : str , a__ : List[Any] , a__ : int , a__ : List[str]=None , a__ : str = "test" ):
"""simple docstring"""
__snake_case = self.get_test_dataloader(a__ )
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case = time.time()
try:
__snake_case = eval_loop(
a__ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a__ , metric_key_prefix=a__ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
a__ , a__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(a__ , a__ , output.predictions , '''predict''' )
__snake_case = self.compute_metrics(a__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__snake_case = metrics.pop(a__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=a__ )
| 24 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = BartphoTokenizer
A_ : List[str] = False
A_ : Optional[Any] = True
def a (self : Tuple ):
"""simple docstring"""
super().setUp()
__snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def a (self : str , **a__ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a (self : str , a__ : Any ):
"""simple docstring"""
__snake_case = '''This is a là test'''
__snake_case = '''This is a<unk><unk> test'''
return input_text, output_text
def a (self : Dict ):
"""simple docstring"""
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case = '''This is a là test'''
__snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
| 24 | 1 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = torch.nn.Linear(2 , 4 )
__snake_case = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case = torch.optim.lr_scheduler.OneCycleLR(snake_case_ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCamelCase__ ( snake_case_ : Any ) -> Any:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> Union[str, Any]:
__snake_case = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(snake_case_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@require_cuda
def a (self : Tuple ):
"""simple docstring"""
__snake_case = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a__ ):
__snake_case = Accelerator(cpu=a__ )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = Accelerator()
__snake_case = GradientState()
assert state.num_steps == 1
__snake_case = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case = False
assert state.sync_gradients is False
GradientState._reset_state()
def a (self : List[str] ):
"""simple docstring"""
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def a (self : Dict ):
"""simple docstring"""
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def a (self : str ):
"""simple docstring"""
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a__ : List[Any] , **a__ : List[Any] ):
pass
with patch('''torch.cuda.set_device''' , a__ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
__snake_case = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
__snake_case = get_signature(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1E-3 )
def a (self : int ):
"""simple docstring"""
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
__snake_case = get_signature(a__ )
# saving hook
def save_config(a__ : Any , a__ : Any , a__ : Optional[int] ):
__snake_case = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(a__ , '''data.json''' ) , '''w''' ) as f:
json.dump(a__ , a__ )
# loading hook
def load_config(a__ : str , a__ : Optional[Any] ):
with open(os.path.join(a__ , '''data.json''' ) , '''r''' ) as f:
__snake_case = json.load(a__ )
__snake_case = config['''class_name''']
__snake_case = accelerator.register_save_state_pre_hook(a__ )
__snake_case = accelerator.register_load_state_pre_hook(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match with hooks
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match with hooks removed
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
__snake_case = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ , a__ )
self.assertTrue(dummy_obj is None )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
__snake_case = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ , a__ )
self.assertEqual(
getattr(a__ , '''_is_accelerate_prepared''' , a__ ) , a__ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(a__ , '''_is_accelerate_prepared''' , a__ ) , a__ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a__ , '''_is_accelerate_prepared''' , a__ ) , a__ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a__ , '''_is_accelerate_prepared''' , a__ ) , a__ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a__ , '''_is_accelerate_prepared''' , a__ ) , a__ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a__ , '''_is_accelerate_prepared''' , a__ ) , a__ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def a (self : Optional[Any] ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
__snake_case = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a__ , device_map={'''''': 0} , )
__snake_case = Accelerator()
# This should work
__snake_case = accelerator.prepare(a__ )
@slow
@require_bnb
def a (self : Any ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
__snake_case = Accelerator()
with init_empty_weights():
__snake_case = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case = infer_auto_device_map(a__ )
__snake_case = '''cpu'''
__snake_case = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=a__ , load_in_abit=a__ , llm_inta_enable_fpaa_cpu_offload=a__ )
# This should not work and get value error
with self.assertRaises(a__ ):
__snake_case = accelerator.prepare(a__ )
@slow
@require_bnb
@require_multi_gpu
def a (self : int ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
__snake_case = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case = infer_auto_device_map(a__ )
__snake_case = 1
__snake_case = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a__ , device_map=a__ , )
__snake_case = Accelerator()
# This should not work and get value error
with self.assertRaises(a__ ):
__snake_case = accelerator.prepare(a__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def a (self : int ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__snake_case = infer_auto_device_map(a__ )
__snake_case = 1
__snake_case = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a__ , device_map=a__ , )
__snake_case = Accelerator()
# This should work
__snake_case = accelerator.prepare(a__ )
@require_cuda
def a (self : str ):
"""simple docstring"""
__snake_case = torch.nn.Linear(10 , 10 )
__snake_case = torch.optim.SGD(model.parameters() , lr=0.0_1 )
__snake_case = Accelerator(cpu=a__ )
__snake_case = accelerator.prepare(a__ )
| 24 |
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if not isinstance(snake_case_ , snake_case_ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__snake_case = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class SCREAMING_SNAKE_CASE__ :
A_ : List[str] = None
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = os.path.join(a__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(a__ )
__snake_case = self.feature_extraction_class.from_json_file(a__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = feat_extract_first.save_pretrained(a__ )[0]
check_json_file_has_correct_format(a__ )
__snake_case = self.feature_extraction_class.from_pretrained(a__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = self.feature_extraction_class()
self.assertIsNotNone(a__ )
| 24 |
from math import loga
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(snake_case_ , snake_case_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 24 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[int] , *a__ : Any , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[int] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : Tuple ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : Dict , **a__ : Any ):
"""simple docstring"""
return {}, {}, {}
def a (self : List[str] , a__ : Any ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : int , a__ : List[Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 24 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Optional[int] , a__ : Optional[int] , a__ : List[str]=12 , a__ : Dict=7 , a__ : Union[str, Any]=True , a__ : List[str]=True , a__ : List[str]=True , a__ : Any=99 , a__ : int=32 , a__ : Optional[int]=32 , a__ : Optional[Any]=2 , a__ : List[Any]=4 , a__ : int=37 , a__ : List[Any]=0.1 , a__ : List[str]=0.1 , a__ : List[str]=512 , a__ : Dict=0.0_2 , a__ : int=0 , a__ : Tuple=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = projection_dim
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = max_position_embeddings
__snake_case = initializer_range
__snake_case = scope
__snake_case = bos_token_id
def a (self : int ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__snake_case = input_mask.numpy()
__snake_case , __snake_case = input_mask.shape
__snake_case = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a__ ):
__snake_case = 1
__snake_case = 0
__snake_case = self.get_config()
return config, input_ids, tf.convert_to_tensor(a__ )
def a (self : Any ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def a (self : Any , a__ : Optional[int] , a__ : Tuple , a__ : List[Any] ):
"""simple docstring"""
__snake_case = TFBlipTextModel(config=a__ )
__snake_case = model(a__ , attention_mask=a__ , training=a__ )
__snake_case = model(a__ , training=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = (TFBlipTextModel,) if is_tf_available() else ()
A_ : List[Any] = False
A_ : List[str] = False
A_ : Tuple = False
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = BlipTextModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : str ):
"""simple docstring"""
pass
def a (self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def a (self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@slow
def a (self : List[Any] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = TFBlipTextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def a (self : Tuple , a__ : List[str]=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=a__ )
| 24 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> Any:
assert _test_patching.open is open
__snake_case = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , snake_case_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> List[str]:
# pandas.read_csv is not present in _test_patching
__snake_case = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ):
pass
def lowerCamelCase__ ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__snake_case = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , snake_case_ ) is None
with patch_submodule(_test_patching , '''len''' , snake_case_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = '''__test_patch_submodule_start_and_stop_mock__'''
__snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case = '''__test_patch_submodule_successive_join__'''
__snake_case = '''__test_patch_submodule_successive_dirname__'''
__snake_case = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
| 24 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCamelCase__ ( snake_case_ : dict ) -> tuple:
return (data["data"], data["target"])
def lowerCamelCase__ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> np.ndarray:
__snake_case = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(snake_case_ , snake_case_ )
# Predict target for test data
__snake_case = xgb.predict(snake_case_ )
__snake_case = predictions.reshape(len(snake_case_ ) , 1 )
return predictions
def lowerCamelCase__ ( ) -> None:
__snake_case = fetch_california_housing()
__snake_case , __snake_case = data_handling(snake_case_ )
__snake_case , __snake_case , __snake_case , __snake_case = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 , random_state=1 )
__snake_case = xgboost(snake_case_ , snake_case_ , snake_case_ )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(snake_case_ , snake_case_ )}""" )
print(f"""Mean Square Error : {mean_squared_error(snake_case_ , snake_case_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 24 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
A_ : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
A_ : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
A_ : bool = field(
default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
__snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__snake_case = SeqaSeqDataset
# Get datasets
__snake_case = (
dataset_class(
snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__snake_case = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
__snake_case = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
__snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__snake_case = train_result.metrics
__snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate(metric_key_prefix='''val''' )
__snake_case = data_args.n_val
__snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' )
__snake_case = test_output.metrics
__snake_case = data_args.n_test
if trainer.is_world_process_zero():
__snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
__snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__snake_case = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24 | 1 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[str] = CpmAntTokenizer
A_ : Optional[int] = False
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
__snake_case = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def a (self : Dict ):
"""simple docstring"""
__snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__snake_case = '''今天天气真好!'''
__snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = '''今天天气真好!'''
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
__snake_case = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 24 |
from math import pi
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 24 | 1 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class SCREAMING_SNAKE_CASE__ :
def __init__(self : List[str] , a__ : str = "cpu" , a__ : str = "openai/clip-vit-large-patch14" ):
"""simple docstring"""
__snake_case = device
__snake_case = CLIPTokenizerFast.from_pretrained(a__ )
__snake_case = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
__snake_case = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
__snake_case = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__snake_case = torchvision.transforms.Resize(224 )
__snake_case = torchvision.transforms.CenterCrop(224 )
def a (self : Optional[Any] , a__ : str ):
"""simple docstring"""
__snake_case = self.resize(a__ )
__snake_case = self.center_crop(a__ )
__snake_case = self.normalize(a__ )
return images
def __call__(self : Union[str, Any] , a__ : Tuple=None , a__ : Tuple=None , **a__ : int ):
"""simple docstring"""
__snake_case = self.tokenizer(text=a__ , **a__ )
__snake_case = self.preprocess_img(a__ )
__snake_case = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Optional[int] , a__ : Dict=10 , a__ : Dict=0.0_1 , a__ : Union[str, Any]=None , a__ : Any=None , a__ : Optional[Any]=None , a__ : Dict=None , a__ : Union[str, Any]=None , a__ : List[str]=None , a__ : Union[str, Any]=False , a__ : Dict=True , a__ : Dict="image" , a__ : Optional[Any]=True , a__ : List[str]=False , a__ : Optional[Any]=False , a__ : List[str]=False , ):
"""simple docstring"""
super().__init__()
__snake_case = None
__snake_case = device if device else get_device()
if vqgan:
__snake_case = vqgan
else:
__snake_case = load_vqgan(self.device , conf_path=a__ , ckpt_path=a__ )
self.vqgan.eval()
if clip:
__snake_case = clip
else:
__snake_case = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
__snake_case = ProcessorGradientFlow(device=self.device )
__snake_case = iterations
__snake_case = lr
__snake_case = log
__snake_case = make_grid
__snake_case = return_val
__snake_case = quantize
__snake_case = self.vqgan.decoder.z_shape
def a (self : str , a__ : Optional[int]=None , a__ : Tuple=None , a__ : str=5 , a__ : int=True ):
"""simple docstring"""
__snake_case = []
if output_path is None:
__snake_case = '''./animation.gif'''
if input_path is None:
__snake_case = self.save_path
__snake_case = sorted(glob(input_path + '''/*''' ) )
if not len(a__ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(a__ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
__snake_case = total_duration / len(a__ )
__snake_case = [frame_duration] * len(a__ )
if extend_frames:
__snake_case = 1.5
__snake_case = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(a__ ) )
imageio.mimsave(a__ , a__ , duration=a__ )
print(f"""gif saved to {output_path}""" )
def a (self : List[Any] , a__ : List[str]=None , a__ : Optional[int]=None ):
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
__snake_case = preprocess(Image.open(a__ ) , target_image_size=256 ).to(self.device )
__snake_case = preprocess_vqgan(a__ )
__snake_case , *__snake_case = self.vqgan.encode(a__ )
return z
def a (self : List[str] , a__ : int ):
"""simple docstring"""
__snake_case = self.latent.detach().requires_grad_()
__snake_case = base_latent + transform_vector
if self.quantize:
__snake_case , *__snake_case = self.vqgan.quantize(a__ )
else:
__snake_case = trans_latent
return self.vqgan.decode(a__ )
def a (self : int , a__ : Dict , a__ : str , a__ : str=None ):
"""simple docstring"""
__snake_case = self.clip_preprocessor(text=a__ , images=a__ , return_tensors='''pt''' , padding=a__ )
__snake_case = self.clip(**a__ )
__snake_case = clip_outputs.logits_per_image
if weights is not None:
__snake_case = similarity_logits * weights
return similarity_logits.sum()
def a (self : Dict , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : int ):
"""simple docstring"""
__snake_case = self._get_clip_similarity(pos_prompts['''prompts'''] , a__ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
__snake_case = self._get_clip_similarity(neg_prompts['''prompts'''] , a__ , weights=neg_prompts['''weights'''] )
else:
__snake_case = torch.tensor([1] , device=self.device )
__snake_case = -torch.log(a__ ) + torch.log(a__ )
return loss
def a (self : Any , a__ : Optional[Any] , a__ : List[str] , a__ : Tuple ):
"""simple docstring"""
__snake_case = torch.randn_like(self.latent , requires_grad=a__ , device=self.device )
__snake_case = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__snake_case = self._add_vector(a__ )
__snake_case = loop_post_process(a__ )
__snake_case = self._get_CLIP_loss(a__ , a__ , a__ )
print('''CLIP loss''' , a__ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=a__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def a (self : int , a__ : Optional[Any] , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
wandb.init(reinit=a__ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
__snake_case = Image.open(a__ )
__snake_case = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(a__ ) )
def a (self : Optional[int] , a__ : int ):
"""simple docstring"""
if not prompts:
return []
__snake_case = []
__snake_case = []
if isinstance(a__ , a__ ):
__snake_case = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(a__ , (tuple, list) ):
__snake_case = prompt[0]
__snake_case = float(prompt[1] )
elif ":" in prompt:
__snake_case , __snake_case = prompt.split(''':''' )
__snake_case = float(a__ )
else:
__snake_case = prompt
__snake_case = 1.0
processed_prompts.append(a__ )
weights.append(a__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a__ , device=self.device ),
}
def a (self : Optional[Any] , a__ : Union[str, Any] , a__ : Any=None , a__ : List[Any]=None , a__ : Optional[int]=True , a__ : Any=False , a__ : Any=True , a__ : int=True , a__ : List[str]=None , ):
"""simple docstring"""
if image_path:
__snake_case = self._get_latent(a__ )
else:
__snake_case = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(a__ , a__ , a__ )
assert pos_prompts, "You must provide at least one positive prompt."
__snake_case = self.process_prompts(a__ )
__snake_case = self.process_prompts(a__ )
if save_final and save_path is None:
__snake_case = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(a__ ):
os.makedirs(a__ )
else:
__snake_case = save_path + '''_''' + get_timestamp()
os.makedirs(a__ )
__snake_case = save_path
__snake_case = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(a__ ) )
__snake_case = loop_post_process(a__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(a__ , a__ , a__ ) ):
if show_intermediate:
show_pil(a__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(a__ )} )
if show_final:
show_pil(a__ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 24 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'vit_msn'
def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
| 24 | 1 |
def lowerCamelCase__ ( snake_case_ : int = 1 , snake_case_ : int = 1000 ) -> int:
__snake_case = 1
__snake_case = 0
for divide_by_number in range(snake_case_ , digit + 1 ):
__snake_case = []
__snake_case = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(snake_case_ ):
__snake_case = len(snake_case_ )
__snake_case = divide_by_number
else:
has_been_divided.append(snake_case_ )
__snake_case = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a (self : int , a__ : List[Any]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) )
__snake_case = np.random.RandomState(a__ )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
# warmup pass to apply optimizations
__snake_case = pipe(**self.get_dummy_inputs() )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Any ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def a (self : List[str] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 24 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 24 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case_ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : str
A_ : str
A_ : Optional[str] = None
A_ : Optional[str] = None
A_ : Optional[str] = None
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : List[int]
A_ : Optional[List[int]] = None
A_ : Optional[List[int]] = None
A_ : Optional[Union[int, float]] = None
A_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[InputFeatures]
def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = os.path.join(
a__ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , )
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + '''.lock'''
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case = torch.load(a__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case = (
processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
)
logger.info('''Training examples: %s''' , len(a__ ) )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(self.features , a__ )
def __len__(self : int ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Dict , a__ : List[Any] ):
"""simple docstring"""
return self.features[i]
def a (self : List[Any] ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
A_ : List[InputFeatures]
def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
__snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case = tf.data.Dataset.from_generator(
a__ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def a (self : Union[str, Any] ):
"""simple docstring"""
return self.dataset
def __len__(self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Any , a__ : Dict ):
"""simple docstring"""
return self.features[i]
def a (self : str ):
"""simple docstring"""
return self.label_list
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Dict , a__ : Dict ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def a (self : Optional[int] , a__ : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def a (self : int ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def a (self : Any , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = []
for i, line in enumerate(a__ ):
if i == 0:
continue
__snake_case = '''%s-%s''' % (set_type, line[0])
__snake_case = line[5]
__snake_case = line[6]
__snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case = line[0]
examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) )
return examples
def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]:
__snake_case = {label: i for i, label in enumerate(snake_case_ )}
__snake_case = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
__snake_case = label_map[example.label] if example.label in label_map else 0
__snake_case = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
snake_case_ = {
'hans': 3,
}
snake_case_ = {
'hans': HansProcessor,
}
| 24 | 1 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def a (*a__ : List[str] , **a__ : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
snake_case_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def a (self : List[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model=a__ , tokenizer=a__ , image_processor=a__ )
__snake_case = INVOICE_URL
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
__snake_case = '''What is the placebo?'''
__snake_case = [
{
'''image''': load_image(a__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def a (self : Union[str, Any] , a__ : Optional[int] , a__ : Dict ):
"""simple docstring"""
__snake_case = dqa_pipeline(a__ , top_k=2 )
self.assertEqual(
a__ , [
[
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def a (self : Dict ):
"""simple docstring"""
__snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__snake_case = INVOICE_URL
__snake_case = '''How many cats are there?'''
__snake_case = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(a__ , [] )
# We can optionnally pass directly the words and bounding boxes
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = []
__snake_case = []
__snake_case = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 )
self.assertEqual(a__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : str ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Tuple ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Dict ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def a (self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def a (self : List[str] ):
"""simple docstring"""
pass
| 24 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'CLIPImageProcessor'
A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ):
"""simple docstring"""
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
__snake_case = kwargs.pop('''feature_extractor''' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
__snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
__snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a__ , **a__ )
def a (self : Any , *a__ : List[Any] , **a__ : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*a__ , **a__ )
@property
def a (self : int ):
"""simple docstring"""
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24 | 1 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : str , a__ : int , a__ : int ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__snake_case = img
__snake_case = img.shape[1]
__snake_case = img.shape[0]
__snake_case = dst_width
__snake_case = dst_height
__snake_case = self.src_w / self.dst_w
__snake_case = self.src_h / self.dst_h
__snake_case = __snake_case = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def a (self : List[str] ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__snake_case = self.img[self.get_y(a__ )][self.get_x(a__ )]
def a (self : Union[str, Any] , a__ : int ):
"""simple docstring"""
return int(self.ratio_x * x )
def a (self : List[Any] , a__ : int ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
snake_case_ , snake_case_ = 800, 600
snake_case_ = imread('image_data/lena.jpg', 1)
snake_case_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 24 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_logger()
# the current default level is logging.WARNING
__snake_case = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_verbosity()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(a__ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def a (self : Dict ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ )
__snake_case = logging.log_levels[env_level_str]
__snake_case = logging.get_verbosity()
self.assertEqual(
a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
__snake_case = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def a (self : List[Any] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.logging.getLogger()
with CaptureLogger(a__ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def a (self : Any ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
def lowerCamelCase__ ( ) -> str:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 24 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int | str] ) -> None:
create_state_space_tree(snake_case_ , [] , 0 , [0 for i in range(len(snake_case_ ) )] )
def lowerCamelCase__ ( snake_case_ : list[int | str] , snake_case_ : list[int | str] , snake_case_ : int , snake_case_ : list[int] , ) -> None:
if index == len(snake_case_ ):
print(snake_case_ )
return
for i in range(len(snake_case_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__snake_case = True
create_state_space_tree(snake_case_ , snake_case_ , index + 1 , snake_case_ )
current_sequence.pop()
__snake_case = False
snake_case_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
snake_case_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 24 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[str] = CpmAntTokenizer
A_ : Optional[int] = False
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
__snake_case = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def a (self : Dict ):
"""simple docstring"""
__snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__snake_case = '''今天天气真好!'''
__snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = '''今天天气真好!'''
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
__snake_case = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 24 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
snake_case_ = None
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
snake_case_ = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
snake_case_ = {
'google/rembert': 256,
}
snake_case_ = '▁'
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Optional[int] = VOCAB_FILES_NAMES
A_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Tuple = RemBertTokenizer
def __init__(self : Union[str, Any] , a__ : List[str]=None , a__ : Dict=None , a__ : Union[str, Any]=True , a__ : Tuple=True , a__ : Dict=False , a__ : Tuple="[CLS]" , a__ : Tuple="[SEP]" , a__ : List[str]="<unk>" , a__ : str="[SEP]" , a__ : Any="<pad>" , a__ : List[Any]="[CLS]" , a__ : str="[MASK]" , **a__ : Tuple , ):
"""simple docstring"""
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , **a__ , )
__snake_case = do_lower_case
__snake_case = remove_space
__snake_case = keep_accents
__snake_case = vocab_file
__snake_case = False if not self.vocab_file else True
def a (self : List[Any] , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a (self : Tuple , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1]
def a (self : str , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a (self : Any , a__ : str , a__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(a__ ) )
return
__snake_case = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 24 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = out_indices if out_indices is not None else [4]
__snake_case = stage_names
__snake_case = out_features
__snake_case = backbone
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = use_pretrained_backbone
__snake_case = is_training
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = self.get_config()
return config, pixel_values
def a (self : Any ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def a (self : List[Any] , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = TimmBackbone(config=a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(a__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def a (self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
A_ : List[Any] = False
A_ : Dict = False
A_ : Any = False
A_ : List[Any] = False
def a (self : Tuple ):
"""simple docstring"""
__snake_case = TimmBackboneModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def a (self : Any ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : int ):
"""simple docstring"""
__snake_case = '''resnet18'''
__snake_case = '''microsoft/resnet-18'''
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ )
__snake_case = AutoBackbone.from_pretrained(a__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] )
__snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def a (self : str ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
def a (self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case = self.all_model_classes[0]
__snake_case = model_class(a__ )
model.to(a__ )
__snake_case = self._prepare_for_class(a__ , a__ )
__snake_case = model(**a__ )
__snake_case = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case = copy.deepcopy(a__ )
__snake_case = None
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case = copy.deepcopy(a__ )
__snake_case = False
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
| 24 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.