code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """trajectory_transformer"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any, __A : List[str]=1_0_0, __A : Any=5, __A : Tuple=1, __A : List[Any]=1, __A : List[Any]=2_4_9, __A : List[Any]=6, __A : Union[str, Any]=1_7, __A : List[Any]=2_5, __A : Any=4, __A : Optional[int]=4, __A : Any=1_2_8, __A : Optional[int]=0.1, __A : Optional[int]=0.1, __A : Dict=0.1, __A : Tuple=0.0_0_0_6, __A : List[Any]=5_1_2, __A : str=0.0_2, __A : List[str]=1E-12, __A : Dict=1, __A : str=True, __A : Optional[int]=1, __A : int=5_0_2_5_6, __A : Any=5_0_2_5_6, **__A : Optional[int], ):
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : Any = action_weight
UpperCAmelCase : List[str] = reward_weight
UpperCAmelCase : List[str] = value_weight
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : Any = block_size
UpperCAmelCase : Dict = action_dim
UpperCAmelCase : Any = observation_dim
UpperCAmelCase : int = transition_dim
UpperCAmelCase : List[str] = learning_rate
UpperCAmelCase : Union[str, Any] = n_layer
UpperCAmelCase : Optional[Any] = n_head
UpperCAmelCase : str = n_embd
UpperCAmelCase : str = embd_pdrop
UpperCAmelCase : Tuple = attn_pdrop
UpperCAmelCase : str = resid_pdrop
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : Optional[Any] = layer_norm_eps
UpperCAmelCase : Optional[Any] = kaiming_initializer_range
UpperCAmelCase : int = use_cache
super().__init__(pad_token_id=__A, bos_token_id=__A, eos_token_id=__A, **__A )
| 336 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {"vocab_file": "vocab.txt"}
_lowerCamelCase : List[str] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase : List[Any] = {
"facebook/esm2_t6_8M_UR50D": 1_0_2_4,
"facebook/esm2_t12_35M_UR50D": 1_0_2_4,
}
def a__ ( UpperCAmelCase : List[str] ) -> Any:
with open(UpperCAmelCase , '''r''' ) as f:
UpperCAmelCase : Dict = f.read().splitlines()
return [l.strip() for l in lines]
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Any, __A : Dict, __A : List[Any]="<unk>", __A : List[str]="<cls>", __A : Any="<pad>", __A : Union[str, Any]="<mask>", __A : int="<eos>", **__A : Tuple, ):
super().__init__(**__A )
UpperCAmelCase : Tuple = load_vocab_file(__A )
UpperCAmelCase : List[Any] = dict(enumerate(self.all_tokens ) )
UpperCAmelCase : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase : Union[str, Any] = unk_token
UpperCAmelCase : Optional[Any] = cls_token
UpperCAmelCase : Optional[int] = pad_token
UpperCAmelCase : Optional[int] = mask_token
UpperCAmelCase : List[str] = eos_token
UpperCAmelCase : Optional[Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __magic_name__ ( self : Tuple, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : List[Any], __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : Optional[Any], **__A : Union[str, Any] ):
return text.split()
def __magic_name__ ( self : Optional[int], __A : Dict=False ):
return len(self._id_to_token )
def __magic_name__ ( self : int ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __magic_name__ ( self : Tuple, __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : Union[str, Any], __A : List[int], __A : Optional[List[int]] = None ):
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : Optional[int] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __magic_name__ ( self : Any, __A : List, __A : Optional[List] = None, __A : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase : Dict = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
mask += [0] * len(__A ) + [1]
return mask
def __magic_name__ ( self : Optional[int], __A : List[Any], __A : Dict ):
UpperCAmelCase : Union[str, Any] = os.path.join(__A, (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(__A, '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __magic_name__ ( self : Dict ):
return self.get_vocab_size(with_added_tokens=__A )
def __magic_name__ ( self : Optional[int], __A : Union[List[str], List[AddedToken]], __A : bool = False ):
return super()._add_tokens(__A, special_tokens=__A )
| 336 | 1 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowerCamelCase : str = logging.get_logger(__name__)
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Tuple, *__A : Optional[Any], **__A : Dict ):
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''', __A, )
super().__init__(*__A, **__A )
| 336 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A, '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__A, '''depth_multiplier''' ) )
class __UpperCAmelCase :
def __init__( self : int, __A : List[Any], __A : str=1_3, __A : Dict=3, __A : int=3_2, __A : int=0.2_5, __A : List[str]=8, __A : int=8, __A : Dict=6, __A : str=3_2, __A : Any=True, __A : str=True, __A : int=True, __A : Union[str, Any]="relu6", __A : Any=1_2_8_0, __A : List[Any]=0.1, __A : Optional[Any]=0.0_2, __A : Tuple=True, __A : List[Any]=True, __A : str=1_0, __A : Optional[Any]=None, ):
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : str = image_size
UpperCAmelCase : Optional[int] = depth_multiplier
UpperCAmelCase : Union[str, Any] = depth_divisible_by
UpperCAmelCase : Optional[Any] = min_depth
UpperCAmelCase : List[str] = expand_ratio
UpperCAmelCase : Dict = tf_padding
UpperCAmelCase : str = output_stride
UpperCAmelCase : Union[str, Any] = first_layer_is_expansion
UpperCAmelCase : List[Any] = finegrained_output
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCAmelCase : Optional[Any] = classifier_dropout_prob
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Any = scope
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Dict = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def __magic_name__ ( self : List[Any], __A : Dict, __A : Optional[Any], __A : Optional[int], __A : Union[str, Any] ):
UpperCAmelCase : Any = MobileNetVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
self.parent.assertEqual(
result.pooler_output.shape, (self.batch_size, self.last_hidden_size), )
def __magic_name__ ( self : str, __A : Union[str, Any], __A : Dict, __A : Optional[Any], __A : str ):
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : Any = MobileNetVaForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : List[str], __A : Dict, __A : Dict ):
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Dict = MobileNetVaForSemanticSegmentation(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCAmelCase : Optional[Any] = model(__A, labels=__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = MobileNetVaModelTester(self )
UpperCAmelCase : List[Any] = MobileNetVaConfigTester(self, config_class=__A, has_text_modality=__A )
def __magic_name__ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Tuple ):
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def __magic_name__ ( self : Any ):
pass
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : int ):
def check_hidden_states_output(__A : Any, __A : Optional[Any], __A : str ):
UpperCAmelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Optional[Any] = outputs.hidden_states
UpperCAmelCase : List[Any] = 1_6
self.assertEqual(len(__A ), __A )
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def __magic_name__ ( self : Dict ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = MobileNetVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> int:
UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[Any] ):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A )
UpperCAmelCase : Optional[int] = self.default_image_processor
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : str = model(**__A )
# verify the logits
UpperCAmelCase : int = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = model.to(__A )
UpperCAmelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**__A )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase : Tuple = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
], device=__A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], __A, atol=1E-4 ) )
| 336 | 1 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def a__ ( UpperCAmelCase : List[str] ) -> str:
UpperCAmelCase : Dict = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape
UpperCAmelCase : Optional[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
UpperCAmelCase : str = emb.weight.data
return lin_layer
def a__ ( UpperCAmelCase : Tuple ) -> Dict:
UpperCAmelCase : Optional[int] = torch.load(UpperCAmelCase , map_location='''cpu''' )
UpperCAmelCase : int = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
UpperCAmelCase : Dict = mam_aaa['''model''']
remove_ignore_keys_(UpperCAmelCase )
UpperCAmelCase : List[str] = state_dict['''encoder.embed_tokens.weight'''].shape[0]
UpperCAmelCase : Optional[int] = MaMaaaConfig(
vocab_size=UpperCAmelCase , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
UpperCAmelCase : List[str] = state_dict['''decoder.embed_tokens.weight''']
UpperCAmelCase : Dict = MaMaaaForConditionalGeneration(UpperCAmelCase )
model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
UpperCAmelCase : int = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCamelCase : Union[str, Any] = parser.parse_args()
_lowerCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 336 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """codegen"""
UpperCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any, __A : Optional[int]=5_0_4_0_0, __A : Tuple=2_0_4_8, __A : Optional[int]=2_0_4_8, __A : List[str]=4_0_9_6, __A : List[str]=2_8, __A : Union[str, Any]=1_6, __A : Tuple=6_4, __A : Union[str, Any]=None, __A : Union[str, Any]="gelu_new", __A : Any=0.0, __A : Dict=0.0, __A : str=0.0, __A : Optional[int]=1E-5, __A : Any=0.0_2, __A : Any=True, __A : Union[str, Any]=5_0_2_5_6, __A : List[str]=5_0_2_5_6, __A : int=False, **__A : List[Any], ):
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Tuple = n_ctx
UpperCAmelCase : Tuple = n_positions
UpperCAmelCase : Optional[int] = n_embd
UpperCAmelCase : Union[str, Any] = n_layer
UpperCAmelCase : List[str] = n_head
UpperCAmelCase : Tuple = n_inner
UpperCAmelCase : int = rotary_dim
UpperCAmelCase : List[Any] = activation_function
UpperCAmelCase : List[str] = resid_pdrop
UpperCAmelCase : Optional[Any] = embd_pdrop
UpperCAmelCase : str = attn_pdrop
UpperCAmelCase : Tuple = layer_norm_epsilon
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : Any = bos_token_id
UpperCAmelCase : List[str] = eos_token_id
super().__init__(
bos_token_id=__A, eos_token_id=__A, tie_word_embeddings=__A, **__A )
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Any, __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ):
super().__init__(__A, task=__A, patching_specs=__A, use_past=__A )
if not getattr(self._config, '''pad_token_id''', __A ):
# TODO: how to do that better?
UpperCAmelCase : Union[str, Any] = 0
@property
def __magic_name__ ( self : str ):
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__A, direction='''inputs''' )
UpperCAmelCase : int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __magic_name__ ( self : Dict ):
return self._config.n_layer
@property
def __magic_name__ ( self : List[str] ):
return self._config.n_head
def __magic_name__ ( self : str, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ):
UpperCAmelCase : Union[str, Any] = super(__A, self ).generate_dummy_inputs(
__A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase : str = seqlen + 2
UpperCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase : Optional[int] = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase : Optional[Any] = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase : Dict = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 )
return ordered_inputs
@property
def __magic_name__ ( self : Tuple ):
return 1_3
| 336 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else ''''''
UpperCAmelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''),
(f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any:
for i in range(config.num_hidden_layers ):
UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : str = q_bias
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase : str = gamma_a
UpperCAmelCase : Dict = gamma_a
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : str = val
def a__ ( ) -> Optional[int]:
UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]:
UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True
UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase : List[Any] = 1_024
UpperCAmelCase : Optional[Any] = 4_096
UpperCAmelCase : Any = 24
UpperCAmelCase : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : List[Any] = '''huggingface/label-files'''
UpperCAmelCase : Any = '''rvlcdip-id2label.json'''
UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = idalabel
UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model''']
UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase )
# load HuggingFace model
UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase )
model.eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image
UpperCAmelCase : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase )
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
UpperCAmelCase : str = encoding['''pixel_values''']
UpperCAmelCase : Any = model(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify logits
UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected"
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
if has_lm_head:
UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 336 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 336 | 1 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_lowerCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_lowerCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
_lowerCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
_lowerCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : tuple[int, ...] ) -> str | None:
UpperCAmelCase : str = ""
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
for keychar, cipherchar in zip(cycle(UpperCAmelCase ) , UpperCAmelCase ):
UpperCAmelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCAmelCase )
return decoded
def a__ ( UpperCAmelCase : list[int] ) -> list[str]:
UpperCAmelCase : list[str] = []
for key in product(UpperCAmelCase , repeat=3 ):
UpperCAmelCase : int = try_key(UpperCAmelCase , UpperCAmelCase )
if encoded is not None:
possibles.append(UpperCAmelCase )
return possibles
def a__ ( UpperCAmelCase : list[str] , UpperCAmelCase : str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def a__ ( UpperCAmelCase : str = "p059_cipher.txt" ) -> int:
UpperCAmelCase : list[int]
UpperCAmelCase : list[str]
UpperCAmelCase : str
UpperCAmelCase : str
UpperCAmelCase : str = Path(UpperCAmelCase ).parent.joinpath(UpperCAmelCase ).read_text(encoding='''utf-8''' )
UpperCAmelCase : int = [int(UpperCAmelCase ) for number in data.strip().split(''',''' )]
UpperCAmelCase : Dict = filter_valid_chars(UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase : Tuple = filter_common_word(UpperCAmelCase , UpperCAmelCase )
if len(UpperCAmelCase ) == 1:
break
UpperCAmelCase : List[Any] = possibles[0]
return sum(ord(UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 336 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __UpperCAmelCase :
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
@classmethod
def __magic_name__ ( cls : Any ):
return cls()
@dataclass
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@property
def __magic_name__ ( self : Optional[int] ):
return True
@register_to_config
def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ):
pass
def __magic_name__ ( self : Optional[Any] ):
return KarrasVeSchedulerState.create()
def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ):
UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy()
UpperCAmelCase : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, )
def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 )
else:
UpperCAmelCase : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 )
UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape )
UpperCAmelCase : Tuple = sigma + gamma * sigma
UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : int = sample_hat + sigma_hat * model_output
UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output
UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ):
raise NotImplementedError()
| 336 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A, '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__A, '''depth_multiplier''' ) )
class __UpperCAmelCase :
def __init__( self : int, __A : List[Any], __A : str=1_3, __A : Dict=3, __A : int=3_2, __A : int=0.2_5, __A : List[str]=8, __A : int=8, __A : Dict=6, __A : str=3_2, __A : Any=True, __A : str=True, __A : int=True, __A : Union[str, Any]="relu6", __A : Any=1_2_8_0, __A : List[Any]=0.1, __A : Optional[Any]=0.0_2, __A : Tuple=True, __A : List[Any]=True, __A : str=1_0, __A : Optional[Any]=None, ):
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : str = image_size
UpperCAmelCase : Optional[int] = depth_multiplier
UpperCAmelCase : Union[str, Any] = depth_divisible_by
UpperCAmelCase : Optional[Any] = min_depth
UpperCAmelCase : List[str] = expand_ratio
UpperCAmelCase : Dict = tf_padding
UpperCAmelCase : str = output_stride
UpperCAmelCase : Union[str, Any] = first_layer_is_expansion
UpperCAmelCase : List[Any] = finegrained_output
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCAmelCase : Optional[Any] = classifier_dropout_prob
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Any = scope
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Dict = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def __magic_name__ ( self : List[Any], __A : Dict, __A : Optional[Any], __A : Optional[int], __A : Union[str, Any] ):
UpperCAmelCase : Any = MobileNetVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
self.parent.assertEqual(
result.pooler_output.shape, (self.batch_size, self.last_hidden_size), )
def __magic_name__ ( self : str, __A : Union[str, Any], __A : Dict, __A : Optional[Any], __A : str ):
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : Any = MobileNetVaForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : List[str], __A : Dict, __A : Dict ):
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Dict = MobileNetVaForSemanticSegmentation(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCAmelCase : Optional[Any] = model(__A, labels=__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = MobileNetVaModelTester(self )
UpperCAmelCase : List[Any] = MobileNetVaConfigTester(self, config_class=__A, has_text_modality=__A )
def __magic_name__ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Tuple ):
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def __magic_name__ ( self : Any ):
pass
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : int ):
def check_hidden_states_output(__A : Any, __A : Optional[Any], __A : str ):
UpperCAmelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Optional[Any] = outputs.hidden_states
UpperCAmelCase : List[Any] = 1_6
self.assertEqual(len(__A ), __A )
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def __magic_name__ ( self : Dict ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = MobileNetVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> int:
UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[Any] ):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A )
UpperCAmelCase : Optional[int] = self.default_image_processor
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : str = model(**__A )
# verify the logits
UpperCAmelCase : int = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = model.to(__A )
UpperCAmelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**__A )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase : Tuple = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
], device=__A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], __A, atol=1E-4 ) )
| 336 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __UpperCAmelCase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def a__ ( ) -> Dict:
if os.name == "nt":
UpperCAmelCase : List[str] = CursorInfo()
UpperCAmelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Dict = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def a__ ( ) -> Optional[int]:
if os.name == "nt":
UpperCAmelCase : int = CursorInfo()
UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Any = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def a__ ( ) -> Optional[Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 336 | 1 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __UpperCAmelCase :
def __init__( self : int, __A : Dict, __A : Any, __A : str, __A : int, __A : Tuple, __A : Union[str, Any]=0.2, __A : List[Any]=0.2 ):
UpperCAmelCase : Any = bp_numa
UpperCAmelCase : Optional[int] = bp_numa
UpperCAmelCase : Any = bp_numa
UpperCAmelCase : int = conva_get[:2]
UpperCAmelCase : Tuple = conva_get[2]
UpperCAmelCase : List[str] = size_pa
UpperCAmelCase : Union[str, Any] = rate_w
UpperCAmelCase : Optional[Any] = rate_t
UpperCAmelCase : Dict = [
np.mat(-1 * np.random.rand(self.conva[0], self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCAmelCase : List[str] = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
UpperCAmelCase : Optional[int] = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
UpperCAmelCase : List[str] = -2 * np.random.rand(self.conva[1] ) + 1
UpperCAmelCase : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
UpperCAmelCase : List[Any] = -2 * np.random.rand(self.num_bpa ) + 1
def __magic_name__ ( self : Tuple, __A : List[Any] ):
# save model dict with pickle
UpperCAmelCase : Any = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(__A, '''wb''' ) as f:
pickle.dump(__A, __A )
print(F'''Model saved: {save_path}''' )
@classmethod
def __magic_name__ ( cls : str, __A : List[Any] ):
# read saved model
with open(__A, '''rb''' ) as f:
UpperCAmelCase : List[str] = pickle.load(__A ) # noqa: S301
UpperCAmelCase : Any = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
UpperCAmelCase : Tuple = model_dic.get('''size_pooling1''' )
UpperCAmelCase : List[Any] = model_dic.get('''num_bp1''' )
UpperCAmelCase : Dict = model_dic.get('''num_bp2''' )
UpperCAmelCase : int = model_dic.get('''num_bp3''' )
UpperCAmelCase : List[str] = model_dic.get('''rate_weight''' )
UpperCAmelCase : Any = model_dic.get('''rate_thre''' )
# create model instance
UpperCAmelCase : Optional[int] = CNN(__A, __A, __A, __A, __A, __A, __A )
# modify model parameter
UpperCAmelCase : int = model_dic.get('''w_conv1''' )
UpperCAmelCase : Any = model_dic.get('''wkj''' )
UpperCAmelCase : Dict = model_dic.get('''vji''' )
UpperCAmelCase : Union[str, Any] = model_dic.get('''thre_conv1''' )
UpperCAmelCase : str = model_dic.get('''thre_bp2''' )
UpperCAmelCase : Optional[int] = model_dic.get('''thre_bp3''' )
return conv_ins
def __magic_name__ ( self : List[Any], __A : Dict ):
return 1 / (1 + np.exp(-1 * x ))
def __magic_name__ ( self : Tuple, __A : Dict ):
return round(__A, 3 )
def __magic_name__ ( self : List[Any], __A : List[str], __A : List[Any], __A : List[str], __A : Optional[Any], __A : Dict ):
# convolution process
UpperCAmelCase : Union[str, Any] = convs[0]
UpperCAmelCase : int = convs[1]
UpperCAmelCase : List[Any] = np.shape(__A )[0]
# get the data slice of original image data, data_focus
UpperCAmelCase : Any = []
for i_focus in range(0, size_data - size_conv + 1, __A ):
for j_focus in range(0, size_data - size_conv + 1, __A ):
UpperCAmelCase : int = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__A )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCAmelCase : int = []
UpperCAmelCase : Optional[Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__A ):
UpperCAmelCase : List[Any] = []
for i_focus in range(len(__A ) ):
UpperCAmelCase : str = (
np.sum(np.multiply(data_focus[i_focus], w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__A ) )
UpperCAmelCase : Optional[int] = np.asmatrix(__A ).reshape(
__A, __A )
data_featuremap.append(__A )
# expanding the data slice to One dimenssion
UpperCAmelCase : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__A ) )
UpperCAmelCase : int = np.asarray(__A )
return focus_list, data_featuremap
def __magic_name__ ( self : Optional[int], __A : str, __A : int, __A : Union[str, Any]="average_pool" ):
# pooling process
UpperCAmelCase : Union[str, Any] = len(featuremaps[0] )
UpperCAmelCase : Tuple = int(size_map / size_pooling )
UpperCAmelCase : Optional[int] = []
for i_map in range(len(__A ) ):
UpperCAmelCase : List[str] = featuremaps[i_map]
UpperCAmelCase : List[str] = []
for i_focus in range(0, __A, __A ):
for j_focus in range(0, __A, __A ):
UpperCAmelCase : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__A ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__A ) )
UpperCAmelCase : Dict = np.asmatrix(__A ).reshape(__A, __A )
featuremap_pooled.append(__A )
return featuremap_pooled
def __magic_name__ ( self : Any, __A : int ):
# expanding three dimension data to one dimension list
UpperCAmelCase : Dict = []
for i in range(len(__A ) ):
UpperCAmelCase : List[str] = np.shape(data[i] )
UpperCAmelCase : Any = data[i].reshape(1, shapes[0] * shapes[1] )
UpperCAmelCase : Any = data_listed.getA().tolist()[0]
data_expanded.extend(__A )
UpperCAmelCase : List[Any] = np.asarray(__A )
return data_expanded
def __magic_name__ ( self : int, __A : Union[str, Any] ):
# expanding matrix to one dimension list
UpperCAmelCase : int = np.asarray(__A )
UpperCAmelCase : int = np.shape(__A )
UpperCAmelCase : Union[str, Any] = data_mat.reshape(1, shapes[0] * shapes[1] )
return data_expanded
def __magic_name__ ( self : Any, __A : List[str], __A : List[Any], __A : Dict, __A : Tuple, __A : Optional[Any] ):
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : Any = 0
for i_map in range(__A ):
UpperCAmelCase : Dict = np.ones((size_map, size_map) )
for i in range(0, __A, __A ):
for j in range(0, __A, __A ):
UpperCAmelCase : Any = pd_pool[
i_pool
]
UpperCAmelCase : Optional[Any] = i_pool + 1
UpperCAmelCase : List[str] = np.multiply(
__A, np.multiply(out_map[i_map], (1 - out_map[i_map]) ) )
pd_all.append(__A )
return pd_all
def __magic_name__ ( self : Tuple, __A : List[Any], __A : str, __A : List[Any], __A : Tuple, __A : Any, __A : Optional[Any]=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(__A )) )
print((''' - - Shape: Teach_Data ''', np.shape(__A )) )
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Dict = []
UpperCAmelCase : Optional[Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
UpperCAmelCase : str = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(__A ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCAmelCase : str = np.asmatrix(datas_train[p] )
UpperCAmelCase : Any = np.asarray(datas_teach[p] )
UpperCAmelCase , UpperCAmelCase : Dict = self.convolute(
__A, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
UpperCAmelCase : List[str] = self.pooling(__A, self.size_poolinga )
UpperCAmelCase : List[Any] = np.shape(__A )
UpperCAmelCase : Tuple = self._expand(__A )
UpperCAmelCase : List[Any] = data_bp_input
UpperCAmelCase : Any = np.dot(__A, self.vji.T ) - self.thre_bpa
UpperCAmelCase : Optional[int] = self.sig(__A )
UpperCAmelCase : Dict = np.dot(__A, self.wkj.T ) - self.thre_bpa
UpperCAmelCase : Any = self.sig(__A )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCAmelCase : Optional[Any] = np.multiply(
(data_teach - bp_outa), np.multiply(__A, (1 - bp_outa) ) )
UpperCAmelCase : Optional[Any] = np.multiply(
np.dot(__A, self.wkj ), np.multiply(__A, (1 - bp_outa) ) )
UpperCAmelCase : List[str] = np.dot(__A, self.vji )
UpperCAmelCase : List[Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCAmelCase : int = pd_conva_pooled.T.getA().tolist()
UpperCAmelCase : Tuple = self._calculate_gradient_from_pool(
__A, __A, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCAmelCase : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
UpperCAmelCase : Optional[int] = self.rate_weight * np.dot(__A, __A )
UpperCAmelCase : int = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCAmelCase : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCAmelCase : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCAmelCase : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCAmelCase : Union[str, Any] = self.thre_bpa - pd_k_all * self.rate_thre
UpperCAmelCase : int = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCAmelCase : Optional[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCAmelCase : Optional[Any] = rp + 1
UpperCAmelCase : List[Any] = error_count / patterns
all_mse.append(__A )
def draw_error():
UpperCAmelCase : Optional[Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__A, '''+-''' )
plt.plot(__A, '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(__A, alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def __magic_name__ ( self : List[str], __A : Tuple ):
# model predict
UpperCAmelCase : Optional[Any] = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(__A )) )
for p in range(len(__A ) ):
UpperCAmelCase : Tuple = np.asmatrix(datas_test[p] )
UpperCAmelCase , UpperCAmelCase : Tuple = self.convolute(
__A, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
UpperCAmelCase : Dict = self.pooling(__A, self.size_poolinga )
UpperCAmelCase : Any = self._expand(__A )
UpperCAmelCase : int = data_bp_input
UpperCAmelCase : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
UpperCAmelCase : Any = self.sig(__A )
UpperCAmelCase : int = bp_outa * self.wkj.T - self.thre_bpa
UpperCAmelCase : Union[str, Any] = self.sig(__A )
produce_out.extend(bp_outa.getA().tolist() )
UpperCAmelCase : Optional[int] = [list(map(self.do_round, __A ) ) for each in produce_out]
return np.asarray(__A )
def __magic_name__ ( self : Optional[int], __A : Union[str, Any] ):
# return the data of image after convoluting process so we can check it out
UpperCAmelCase : Any = np.asmatrix(__A )
UpperCAmelCase , UpperCAmelCase : Dict = self.convolute(
__A, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
UpperCAmelCase : List[str] = self.pooling(__A, self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 336 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase : Tuple = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 336 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
_lowerCamelCase : Dict = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def a__ ( UpperCAmelCase : int ) -> List[str]:
UpperCAmelCase : Any = {}
with open(UpperCAmelCase , '''r''' ) as file:
for line_number, line in enumerate(UpperCAmelCase ):
UpperCAmelCase : Any = line.strip()
if line:
UpperCAmelCase : int = line.split()
UpperCAmelCase : Union[str, Any] = line_number
UpperCAmelCase : Optional[Any] = words[0]
UpperCAmelCase : List[Any] = value
return result
def a__ ( UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Dict ) -> str:
for attribute in key.split('''.''' ):
UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase , UpperCAmelCase )
UpperCAmelCase : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase ):
UpperCAmelCase : Optional[Any] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
UpperCAmelCase : int = '''param'''
if weight_type is not None and weight_type != "param":
UpperCAmelCase : Dict = getattr(UpperCAmelCase , UpperCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase : List[Any] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
UpperCAmelCase : Optional[int] = getattr(UpperCAmelCase , UpperCAmelCase )
UpperCAmelCase : Optional[Any] = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase : List[Any] = value[0]
else:
UpperCAmelCase : str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase : Dict = value
elif weight_type == "weight_g":
UpperCAmelCase : Dict = value
elif weight_type == "weight_v":
UpperCAmelCase : List[str] = value
elif weight_type == "bias":
UpperCAmelCase : Optional[int] = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
UpperCAmelCase : Tuple = getattr(UpperCAmelCase , UpperCAmelCase )
UpperCAmelCase : Optional[Any] = value
else:
UpperCAmelCase : str = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] ) -> Dict:
UpperCAmelCase : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase ):
UpperCAmelCase : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
UpperCAmelCase : List[Any] = '''param'''
if weight_type is not None and weight_type != "param":
UpperCAmelCase : List[str] = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase : Tuple = '''.'''.join([key, hf_param_name] )
else:
UpperCAmelCase : Optional[Any] = key
UpperCAmelCase : Optional[int] = value if '''lm_head''' in full_key else value[0]
_lowerCamelCase : Any = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : int=None , UpperCAmelCase : int=None ) -> Tuple:
UpperCAmelCase : Union[str, Any] = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase : Dict = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase : Tuple = True
if "*" in mapped_key:
UpperCAmelCase : Any = name.split(UpperCAmelCase )[0].split('''.''' )[-2]
UpperCAmelCase : Union[str, Any] = mapped_key.replace('''*''' , UpperCAmelCase )
if "weight_g" in name:
UpperCAmelCase : Optional[Any] = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase : str = '''weight_v'''
elif "bias" in name:
UpperCAmelCase : Dict = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : str = '''weight'''
else:
UpperCAmelCase : int = None
if hf_dict is not None:
rename_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return is_used
return is_used
def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> Any:
UpperCAmelCase : str = []
UpperCAmelCase : List[str] = fairseq_model.state_dict()
UpperCAmelCase : Dict = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase : Union[str, Any] = True
else:
UpperCAmelCase : List[str] = load_wavaveca_layer(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] ) -> int:
UpperCAmelCase : Optional[Any] = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase : int = name.split('''.''' )
UpperCAmelCase : Any = int(items[0] )
UpperCAmelCase : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase : List[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase : int = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase : Dict = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=False ) -> List[str]:
if config_path is not None:
UpperCAmelCase : Any = WavaVecaConfig.from_pretrained(UpperCAmelCase )
else:
UpperCAmelCase : Optional[int] = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase : List[Any] = read_txt_into_dict(UpperCAmelCase )
UpperCAmelCase : Union[str, Any] = idalabel
UpperCAmelCase : Optional[int] = WavaVecaForSequenceClassification(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
feature_extractor.save_pretrained(UpperCAmelCase )
elif is_finetuned:
if dict_path:
UpperCAmelCase : List[str] = Dictionary.load(UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase : Tuple = target_dict.pad_index
UpperCAmelCase : List[Any] = target_dict.bos_index
UpperCAmelCase : Optional[Any] = target_dict.eos_index
UpperCAmelCase : List[Any] = len(target_dict.symbols )
UpperCAmelCase : int = os.path.join(UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(UpperCAmelCase ) )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
UpperCAmelCase : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[Any] = 1
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(UpperCAmelCase , UpperCAmelCase )
UpperCAmelCase : List[Any] = WavaVecaCTCTokenizer(
UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=UpperCAmelCase , )
UpperCAmelCase : Tuple = True if config.feat_extract_norm == '''layer''' else False
UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
UpperCAmelCase : int = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
UpperCAmelCase : Union[str, Any] = WavaVecaForCTC(UpperCAmelCase )
else:
UpperCAmelCase : List[Any] = WavaVecaForPreTraining(UpperCAmelCase )
if is_finetuned or is_seq_class:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
UpperCAmelCase : Optional[int] = argparse.Namespace(task='''audio_pretraining''' )
UpperCAmelCase : List[str] = fairseq.tasks.setup_task(UpperCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase )
UpperCAmelCase : Dict = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
_lowerCamelCase : Tuple = parser.parse_args()
_lowerCamelCase : List[str] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 336 |
from __future__ import annotations
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
UpperCAmelCase : str = number_of_bytes // partitions
UpperCAmelCase : Dict = []
for i in range(UpperCAmelCase ):
UpperCAmelCase : int = i * bytes_per_partition + 1
UpperCAmelCase : Optional[int] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 1 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def a__ ( UpperCAmelCase : int ) -> Optional[int]: # picklable for multiprocessing
return x.sum()
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[Any]: # picklable for multiprocessing
return i + 1
@dataclass
class __UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : str ):
UpperCAmelCase : Dict = {}
UpperCAmelCase : Tuple = []
UpperCAmelCase : Any = 1
UpperCAmelCase : Any = [1, 2]
UpperCAmelCase : Tuple = {'''a''': 1, '''b''': 2}
UpperCAmelCase : Union[str, Any] = {'''a''': [1, 2], '''b''': [3, 4]}
UpperCAmelCase : Dict = {'''a''': {'''1''': 1}, '''b''': 2}
UpperCAmelCase : Optional[Any] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
UpperCAmelCase : int = {}
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Tuple = 2
UpperCAmelCase : List[str] = [2, 3]
UpperCAmelCase : Union[str, Any] = {'''a''': 2, '''b''': 3}
UpperCAmelCase : List[Any] = {'''a''': [2, 3], '''b''': [4, 5]}
UpperCAmelCase : Optional[int] = {'''a''': {'''1''': 2}, '''b''': 3}
UpperCAmelCase : int = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(__A, __A ), __A )
self.assertEqual(map_nested(__A, __A ), __A )
self.assertEqual(map_nested(__A, __A ), __A )
self.assertEqual(map_nested(__A, __A ), __A )
self.assertEqual(map_nested(__A, __A ), __A )
self.assertEqual(map_nested(__A, __A ), __A )
self.assertEqual(map_nested(__A, __A ), __A )
self.assertEqual(map_nested(__A, __A ), __A )
UpperCAmelCase : Optional[Any] = 2
self.assertEqual(map_nested(__A, __A, num_proc=__A ), __A )
self.assertEqual(map_nested(__A, __A, num_proc=__A ), __A )
self.assertEqual(map_nested(__A, __A, num_proc=__A ), __A )
self.assertEqual(map_nested(__A, __A, num_proc=__A ), __A )
self.assertEqual(map_nested(__A, __A, num_proc=__A ), __A )
self.assertEqual(map_nested(__A, __A, num_proc=__A ), __A )
self.assertEqual(map_nested(__A, __A, num_proc=__A ), __A )
self.assertEqual(map_nested(__A, __A, num_proc=__A ), __A )
UpperCAmelCase : Union[str, Any] = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
UpperCAmelCase : Optional[int] = {'''a''': 2, '''b''': 0, '''c''': 2}
UpperCAmelCase : str = {
'''a''': np.eye(2 ).astype(__A ),
'''b''': np.zeros(3 ).astype(__A ),
'''c''': np.ones(2 ).astype(__A ),
}
self.assertEqual(map_nested(__A, __A, map_numpy=__A ), __A )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__A, __A, map_numpy=__A ).items()}, {k: v.tolist() for k, v in expected_map_nested_sna_int.items()}, )
self.assertEqual(map_nested(__A, __A, map_numpy=__A, num_proc=__A ), __A )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__A, __A, map_numpy=__A, num_proc=__A ).items()}, {k: v.tolist() for k, v in expected_map_nested_sna_int.items()}, )
with self.assertRaises(__A ): # can't pickle a local lambda
map_nested(lambda __A : x + 1, __A, num_proc=__A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : int = {'''a''': 1, '''b''': 2}
UpperCAmelCase : Optional[int] = {'''a''': 3, '''b''': 4}
UpperCAmelCase : List[str] = {'''a''': 5, '''b''': 6}
UpperCAmelCase : Any = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__A, __A, __A ) ), __A )
def __magic_name__ ( self : List[Any] ):
class __UpperCAmelCase :
UpperCamelCase = """bar"""
UpperCAmelCase : Union[str, Any] = Foo()
self.assertEqual(foo.my_attr, '''bar''' )
with temporary_assignment(__A, '''my_attr''', '''BAR''' ):
self.assertEqual(foo.my_attr, '''BAR''' )
self.assertEqual(foo.my_attr, '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int ) -> Dict:
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
UpperCAmelCase : Union[str, Any] = {f'''{i}''': i for i in range(UpperCAmelCase )}
UpperCAmelCase : Optional[Any] = map_nested(lambda UpperCAmelCase : x + 10 , UpperCAmelCase , num_proc=UpperCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __UpperCAmelCase ( lowerCamelCase__ ):
@require_tf
def __magic_name__ ( self : List[Any] ):
import tensorflow as tf
from tensorflow.keras import layers
UpperCAmelCase : List[Any] = layers.Dense(2 )
def gen_random_output():
UpperCAmelCase : str = tf.random.uniform((1, 3) )
return model(__A ).numpy()
with temp_seed(4_2, set_tensorflow=__A ):
UpperCAmelCase : Optional[Any] = gen_random_output()
with temp_seed(4_2, set_tensorflow=__A ):
UpperCAmelCase : Tuple = gen_random_output()
UpperCAmelCase : Optional[Any] = gen_random_output()
np.testing.assert_equal(__A, __A )
self.assertGreater(np.abs(outa - outa ).sum(), 0 )
@require_torch
def __magic_name__ ( self : Optional[int] ):
import torch
def gen_random_output():
UpperCAmelCase : Any = torch.nn.Linear(3, 2 )
UpperCAmelCase : Dict = torch.rand(1, 3 )
return model(__A ).detach().numpy()
with temp_seed(4_2, set_pytorch=__A ):
UpperCAmelCase : int = gen_random_output()
with temp_seed(4_2, set_pytorch=__A ):
UpperCAmelCase : Optional[int] = gen_random_output()
UpperCAmelCase : int = gen_random_output()
np.testing.assert_equal(__A, __A )
self.assertGreater(np.abs(outa - outa ).sum(), 0 )
def __magic_name__ ( self : Tuple ):
def gen_random_output():
return np.random.rand(1, 3 )
with temp_seed(4_2 ):
UpperCAmelCase : Any = gen_random_output()
with temp_seed(4_2 ):
UpperCAmelCase : int = gen_random_output()
UpperCAmelCase : Union[str, Any] = gen_random_output()
np.testing.assert_equal(__A, __A )
self.assertGreater(np.abs(outa - outa ).sum(), 0 )
@pytest.mark.parametrize('''input_data''' , [{}] )
def a__ ( UpperCAmelCase : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Tuple = NestedDataStructure(UpperCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : str ) -> Union[str, Any]:
UpperCAmelCase : List[str] = NestedDataStructure(UpperCAmelCase ).flatten()
assert output == expected_output
def a__ ( ) -> Any:
UpperCAmelCase : List[Any] = A(x=1 , y='''foobar''' )
UpperCAmelCase : Tuple = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(UpperCAmelCase ) == expected_output
UpperCAmelCase : Tuple = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]}
UpperCAmelCase : List[Any] = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(UpperCAmelCase ) == expected_output
with pytest.raises(UpperCAmelCase ):
asdict([1, A(x=10 , y='''foo''' )] )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
return text.split()
def a__ ( UpperCAmelCase : Any ) -> int:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def a__ ( ) -> Any:
with Pool(2 ) as pool:
UpperCAmelCase : Optional[Any] = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(UpperCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCAmelCase : int = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(UpperCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCAmelCase : Tuple = []
for yield_time, content in iflatmap_unordered(
UpperCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCAmelCase )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(UpperCAmelCase ) == 4
| 336 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_lowerCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def a__ ( UpperCAmelCase : Dict=None ) -> Optional[int]:
if subparsers is not None:
UpperCAmelCase : Tuple = subparsers.add_parser('''tpu-config''' , description=_description )
else:
UpperCAmelCase : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
UpperCAmelCase : Optional[int] = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=UpperCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=UpperCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
UpperCAmelCase : Union[str, Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=UpperCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase : List[str] = defaults.commands
if not args.tpu_name:
UpperCAmelCase : Tuple = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase : int = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCAmelCase : Dict = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ):
UpperCAmelCase : Optional[int] = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
UpperCAmelCase : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , UpperCAmelCase ):
UpperCAmelCase : int = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
UpperCAmelCase : int = '''; '''.join(UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase : Any = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(UpperCAmelCase )}''' )
return
subprocess.run(UpperCAmelCase )
print('''Successfully setup pod.''' )
def a__ ( ) -> Any:
UpperCAmelCase : Any = tpu_command_parser()
UpperCAmelCase : Tuple = parser.parse_args()
tpu_command_launcher(UpperCAmelCase )
| 336 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __UpperCAmelCase ( lowerCamelCase__ ):
@staticmethod
@abstractmethod
def __magic_name__ ( __A : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def __magic_name__ ( self : int ):
raise NotImplementedError()
| 336 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
print('''Loading config file...''' )
def flatten_yaml_as_dict(UpperCAmelCase : Tuple , UpperCAmelCase : Any="" , UpperCAmelCase : Dict="." ):
UpperCAmelCase : List[str] = []
for k, v in d.items():
UpperCAmelCase : List[Any] = parent_key + sep + k if parent_key else k
if isinstance(UpperCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCAmelCase , UpperCAmelCase , sep=UpperCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCAmelCase )
UpperCAmelCase : List[str] = argparse.Namespace()
with open(UpperCAmelCase , '''r''' ) as yaml_file:
try:
UpperCAmelCase : List[str] = yaml.load(UpperCAmelCase , Loader=yaml.FullLoader )
UpperCAmelCase : Optional[int] = flatten_yaml_as_dict(UpperCAmelCase )
for k, v in flat_cfg.items():
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(UpperCAmelCase , str(UpperCAmelCase ) ) )
return config
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> List[Any]:
UpperCAmelCase : int = MobileViTVaConfig()
UpperCAmelCase : str = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase : Any = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase : Any = 384
else:
UpperCAmelCase : Tuple = 256
UpperCAmelCase : int = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase : Optional[Any] = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase : str = 384
else:
UpperCAmelCase : Dict = 256
UpperCAmelCase : List[Any] = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase : Optional[Any] = 151
UpperCAmelCase : Tuple = 512
UpperCAmelCase : Tuple = '''ade20k-id2label.json'''
UpperCAmelCase : Tuple = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase : Dict = 21
UpperCAmelCase : str = 512
UpperCAmelCase : Union[str, Any] = '''pascal-voc-id2label.json'''
UpperCAmelCase : Dict = True
# orig_config
UpperCAmelCase : List[Any] = load_orig_config_file(UpperCAmelCase )
assert getattr(UpperCAmelCase , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase : Tuple = getattr(UpperCAmelCase , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(UpperCAmelCase , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase : str = getattr(UpperCAmelCase , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase : Any = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase : Union[str, Any] = '''huggingface/label-files'''
UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Any = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : int = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> List[str]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : List[str] = val
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=False ) -> Union[str, Any]:
if base_model:
UpperCAmelCase : Dict = ''''''
else:
UpperCAmelCase : Dict = '''mobilevitv2.'''
UpperCAmelCase : Optional[int] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase : List[str] = k[8:]
else:
UpperCAmelCase : Dict = k
if ".block." in k:
UpperCAmelCase : List[Any] = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase : List[str] = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('''conv_1.''' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase : int = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
UpperCAmelCase : Any = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
UpperCAmelCase : str = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
UpperCAmelCase : int = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase : Dict = [0, 1]
elif i == 4:
UpperCAmelCase : Dict = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase : int = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
UpperCAmelCase : Optional[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
UpperCAmelCase : Any = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase : Any = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase : Tuple = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Any:
UpperCAmelCase : str = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(UpperCAmelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = get_mobilevitva_config(UpperCAmelCase , UpperCAmelCase )
# load original state_dict
UpperCAmelCase : List[str] = torch.load(UpperCAmelCase , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase : str = MobileViTVaForSemanticSegmentation(UpperCAmelCase ).eval()
UpperCAmelCase : str = False
else:
UpperCAmelCase : Union[str, Any] = MobileViTVaForImageClassification(UpperCAmelCase ).eval()
UpperCAmelCase : Any = False
# remove and rename some keys of load the original model
UpperCAmelCase : Optional[Any] = checkpoint
remove_unused_keys(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = create_rename_keys(UpperCAmelCase , base_model=UpperCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# load modified state_dict
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase : Any = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase : Union[str, Any] = model(**UpperCAmelCase )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase : Optional[Any] = outputs.logits
UpperCAmelCase : int = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase : str = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1E-4 )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 336 | 1 |
import os
import string
import sys
_lowerCamelCase : List[str] = 1 << 8
_lowerCamelCase : Dict = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 2_7,
"up": 6_5 + ARROW_KEY_FLAG,
"down": 6_6 + ARROW_KEY_FLAG,
"right": 6_7 + ARROW_KEY_FLAG,
"left": 6_8 + ARROW_KEY_FLAG,
"mod_int": 9_1,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 5_0,
"delete": 5_1,
"pg_up": 5_3,
"pg_down": 5_4,
}
_lowerCamelCase : Dict = KEYMAP["up"]
_lowerCamelCase : List[str] = KEYMAP["left"]
if sys.platform == "win32":
_lowerCamelCase : str = []
_lowerCamelCase : Any = {
B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(1_0):
_lowerCamelCase : List[str] = ord(str(i))
def a__ ( ) -> Tuple:
if os.name == "nt":
import msvcrt
UpperCAmelCase : List[str] = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(UpperCAmelCase ) == 0:
# Read the keystroke
UpperCAmelCase : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
UpperCAmelCase : Optional[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
UpperCAmelCase : Optional[int] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(UpperCAmelCase )
if ord(UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
UpperCAmelCase : Dict = chr(KEYMAP['''esc'''] )
except KeyError:
UpperCAmelCase : Optional[int] = cha[1]
else:
UpperCAmelCase : Any = ch.decode(UpperCAmelCase )
else:
UpperCAmelCase : int = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
UpperCAmelCase : List[str] = sys.stdin.fileno()
UpperCAmelCase : Optional[Any] = termios.tcgetattr(UpperCAmelCase )
try:
tty.setraw(UpperCAmelCase )
UpperCAmelCase : Union[str, Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(UpperCAmelCase , termios.TCSADRAIN , UpperCAmelCase )
return ch
def a__ ( ) -> Dict:
UpperCAmelCase : Any = get_raw_chars()
if ord(UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(UpperCAmelCase ) == KEYMAP["esc"]:
UpperCAmelCase : Union[str, Any] = get_raw_chars()
if ord(UpperCAmelCase ) == KEYMAP["mod_int"]:
UpperCAmelCase : Tuple = get_raw_chars()
if ord(UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 336 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __UpperCAmelCase ( lowerCamelCase__ ):
def __get__( self : Tuple, __A : Optional[Any], __A : Optional[int]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCAmelCase : str = '''__cached_''' + self.fget.__name__
UpperCAmelCase : int = getattr(__A, __A, __A )
if cached is None:
UpperCAmelCase : Any = self.fget(__A )
setattr(__A, __A, __A )
return cached
def a__ ( UpperCAmelCase : Optional[Any] ) -> Any:
UpperCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_torch_fx_proxy(UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Union[str, Any]:
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : str ) -> Tuple:
return _is_numpy(UpperCAmelCase )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
import torch
return isinstance(UpperCAmelCase , torch.Tensor )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
return False if not is_torch_available() else _is_torch(UpperCAmelCase )
def a__ ( UpperCAmelCase : Tuple ) -> List[str]:
import torch
return isinstance(UpperCAmelCase , torch.device )
def a__ ( UpperCAmelCase : Any ) -> Any:
return False if not is_torch_available() else _is_torch_device(UpperCAmelCase )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ):
if hasattr(UpperCAmelCase , UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
else:
return False
return isinstance(UpperCAmelCase , torch.dtype )
def a__ ( UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase )
def a__ ( UpperCAmelCase : Any ) -> str:
import tensorflow as tf
return isinstance(UpperCAmelCase , tf.Tensor )
def a__ ( UpperCAmelCase : int ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[str] ) -> Tuple:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCAmelCase , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(UpperCAmelCase )
return type(UpperCAmelCase ) == tf.Tensor
def a__ ( UpperCAmelCase : int ) -> List[Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[Any] ) -> Dict:
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCAmelCase , jnp.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]:
return False if not is_flax_available() else _is_jax(UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Tuple:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return [to_py_obj(UpperCAmelCase ) for o in obj]
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase ).tolist()
elif isinstance(UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( UpperCAmelCase : Any ) -> List[str]:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return np.array(UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase )
else:
return obj
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(__A ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase : int = getattr(self, class_fields[0].name )
UpperCAmelCase : str = all(getattr(self, field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__A ):
if isinstance(__A, __A ):
UpperCAmelCase : Tuple = first_field.items()
UpperCAmelCase : Any = True
else:
try:
UpperCAmelCase : Optional[Any] = iter(__A )
UpperCAmelCase : Optional[Any] = True
except TypeError:
UpperCAmelCase : Optional[int] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__A ):
if (
not isinstance(__A, (list, tuple) )
or not len(__A ) == 2
or not isinstance(element[0], __A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self, element[0], element[1] )
if element[1] is not None:
UpperCAmelCase : Union[str, Any] = element[1]
elif first_field is not None:
UpperCAmelCase : Union[str, Any] = first_field
else:
for field in class_fields:
UpperCAmelCase : Optional[Any] = getattr(self, field.name )
if v is not None:
UpperCAmelCase : Optional[int] = v
def __delitem__( self : Union[str, Any], *__A : str, **__A : Tuple ):
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : List[str], *__A : Union[str, Any], **__A : Optional[Any] ):
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Any, *__A : Dict, **__A : str ):
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Dict, *__A : int, **__A : Dict ):
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[str], __A : List[str] ):
if isinstance(__A, __A ):
UpperCAmelCase : int = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any], __A : Dict, __A : Union[str, Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__A, __A )
super().__setattr__(__A, __A )
def __setitem__( self : Dict, __A : List[Any], __A : Union[str, Any] ):
# Will raise a KeyException if needed
super().__setitem__(__A, __A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__A, __A )
def __magic_name__ ( self : List[str] ):
return tuple(self[k] for k in self.keys() )
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@classmethod
def __magic_name__ ( cls : List[Any], __A : Tuple ):
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """longest"""
UpperCamelCase = """max_length"""
UpperCamelCase = """do_not_pad"""
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """pt"""
UpperCamelCase = """tf"""
UpperCamelCase = """np"""
UpperCamelCase = """jax"""
class __UpperCAmelCase :
def __init__( self : Any, __A : List[ContextManager] ):
UpperCAmelCase : Tuple = context_managers
UpperCAmelCase : Tuple = ExitStack()
def __enter__( self : Any ):
for context_manager in self.context_managers:
self.stack.enter_context(__A )
def __exit__( self : List[Any], *__A : Union[str, Any], **__A : Dict ):
self.stack.__exit__(*__A, **__A )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> str:
UpperCAmelCase : int = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( UpperCAmelCase : Dict ) -> Any:
UpperCAmelCase : List[Any] = model_class.__name__
UpperCAmelCase : Union[str, Any] = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( UpperCAmelCase : MutableMapping , UpperCAmelCase : str = "" , UpperCAmelCase : str = "." ) -> Union[str, Any]:
def _flatten_dict(UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]="" , UpperCAmelCase : Any="." ):
for k, v in d.items():
UpperCAmelCase : List[str] = str(UpperCAmelCase ) + delimiter + str(UpperCAmelCase ) if parent_key else k
if v and isinstance(UpperCAmelCase , UpperCAmelCase ):
yield from flatten_dict(UpperCAmelCase , UpperCAmelCase , delimiter=UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
@contextmanager
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : bool = False ) -> Optional[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ) -> Optional[Any]:
if is_numpy_array(UpperCAmelCase ):
return np.transpose(UpperCAmelCase , axes=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.T if axes is None else array.permute(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(UpperCAmelCase , perm=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.transpose(UpperCAmelCase , axes=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.reshape(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.reshape(UpperCAmelCase , UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None ) -> Any:
if is_numpy_array(UpperCAmelCase ):
return np.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> str:
if is_numpy_array(UpperCAmelCase ):
return np.expand_dims(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.unsqueeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.size(UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.size(UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Dict:
for key, value in auto_map.items():
if isinstance(UpperCAmelCase , (tuple, list) ):
UpperCAmelCase : List[Any] = [f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase : List[Any] = f'''{repo_id}--{value}'''
return auto_map
def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]:
for base_class in inspect.getmro(UpperCAmelCase ):
UpperCAmelCase : Any = base_class.__module__
UpperCAmelCase : Dict = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 336 | 1 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
print('''Loading config file...''' )
def flatten_yaml_as_dict(UpperCAmelCase : Tuple , UpperCAmelCase : Any="" , UpperCAmelCase : Dict="." ):
UpperCAmelCase : List[str] = []
for k, v in d.items():
UpperCAmelCase : List[Any] = parent_key + sep + k if parent_key else k
if isinstance(UpperCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCAmelCase , UpperCAmelCase , sep=UpperCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCAmelCase )
UpperCAmelCase : List[str] = argparse.Namespace()
with open(UpperCAmelCase , '''r''' ) as yaml_file:
try:
UpperCAmelCase : List[str] = yaml.load(UpperCAmelCase , Loader=yaml.FullLoader )
UpperCAmelCase : Optional[int] = flatten_yaml_as_dict(UpperCAmelCase )
for k, v in flat_cfg.items():
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(UpperCAmelCase , str(UpperCAmelCase ) ) )
return config
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> List[Any]:
UpperCAmelCase : int = MobileViTVaConfig()
UpperCAmelCase : str = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase : Any = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase : Any = 384
else:
UpperCAmelCase : Tuple = 256
UpperCAmelCase : int = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase : Optional[Any] = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase : str = 384
else:
UpperCAmelCase : Dict = 256
UpperCAmelCase : List[Any] = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase : Optional[Any] = 151
UpperCAmelCase : Tuple = 512
UpperCAmelCase : Tuple = '''ade20k-id2label.json'''
UpperCAmelCase : Tuple = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase : Dict = 21
UpperCAmelCase : str = 512
UpperCAmelCase : Union[str, Any] = '''pascal-voc-id2label.json'''
UpperCAmelCase : Dict = True
# orig_config
UpperCAmelCase : List[Any] = load_orig_config_file(UpperCAmelCase )
assert getattr(UpperCAmelCase , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase : Tuple = getattr(UpperCAmelCase , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(UpperCAmelCase , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase : str = getattr(UpperCAmelCase , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase : Any = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase : Union[str, Any] = '''huggingface/label-files'''
UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Any = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : int = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> List[str]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : List[str] = val
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=False ) -> Union[str, Any]:
if base_model:
UpperCAmelCase : Dict = ''''''
else:
UpperCAmelCase : Dict = '''mobilevitv2.'''
UpperCAmelCase : Optional[int] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase : List[str] = k[8:]
else:
UpperCAmelCase : Dict = k
if ".block." in k:
UpperCAmelCase : List[Any] = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase : List[str] = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('''conv_1.''' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase : int = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
UpperCAmelCase : Any = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
UpperCAmelCase : str = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
UpperCAmelCase : int = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase : Dict = [0, 1]
elif i == 4:
UpperCAmelCase : Dict = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase : int = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
UpperCAmelCase : Optional[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
UpperCAmelCase : Any = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase : Any = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase : Tuple = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Any:
UpperCAmelCase : str = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(UpperCAmelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = get_mobilevitva_config(UpperCAmelCase , UpperCAmelCase )
# load original state_dict
UpperCAmelCase : List[str] = torch.load(UpperCAmelCase , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase : str = MobileViTVaForSemanticSegmentation(UpperCAmelCase ).eval()
UpperCAmelCase : str = False
else:
UpperCAmelCase : Union[str, Any] = MobileViTVaForImageClassification(UpperCAmelCase ).eval()
UpperCAmelCase : Any = False
# remove and rename some keys of load the original model
UpperCAmelCase : Optional[Any] = checkpoint
remove_unused_keys(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = create_rename_keys(UpperCAmelCase , base_model=UpperCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# load modified state_dict
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase : Any = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase : Union[str, Any] = model(**UpperCAmelCase )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase : Optional[Any] = outputs.logits
UpperCAmelCase : int = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase : str = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1E-4 )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 336 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = LayoutLMTokenizer
UpperCamelCase = LayoutLMTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __magic_name__ ( self : Any ):
super().setUp()
UpperCAmelCase : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__ ( self : Union[str, Any], **__A : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **__A )
def __magic_name__ ( self : Optional[int], __A : int ):
UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running'''
UpperCAmelCase : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __magic_name__ ( self : Any ):
UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__A, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), [7, 4, 5, 1_0, 8, 9] )
def __magic_name__ ( self : Optional[int] ):
pass
| 336 | 1 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Optional[int], *__A : Dict, **__A : Optional[int] ):
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''', __A, )
super().__init__(*__A, **__A )
| 336 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
def __init__( self : Any, __A : str, __A : Dict=1_3, __A : int=3_0, __A : Tuple=2, __A : Union[str, Any]=3, __A : Any=True, __A : str=True, __A : Dict=3_2, __A : List[Any]=2, __A : Optional[Any]=4, __A : Union[str, Any]=3_7, __A : int="gelu", __A : int=0.1, __A : List[Any]=0.1, __A : Tuple=1_0, __A : Tuple=0.0_2, __A : Any=3, __A : List[str]=0.6, __A : Any=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Tuple = mask_ratio
UpperCAmelCase : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase : Tuple = (image_size // patch_size) ** 2
UpperCAmelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Optional[Any] ):
return ViTMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, decoder_hidden_size=self.hidden_size, decoder_num_hidden_layers=self.num_hidden_layers, decoder_num_attention_heads=self.num_attention_heads, decoder_intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, )
def __magic_name__ ( self : str, __A : List[Any], __A : Any, __A : Any ):
UpperCAmelCase : Optional[Any] = TFViTMAEModel(config=__A )
UpperCAmelCase : Tuple = model(__A, training=__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : str, __A : int, __A : str ):
UpperCAmelCase : Dict = TFViTMAEForPreTraining(__A )
UpperCAmelCase : int = model(__A, training=__A )
# expected sequence length = num_patches
UpperCAmelCase : int = (self.image_size // self.patch_size) ** 2
UpperCAmelCase : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase : Tuple = 1
UpperCAmelCase : List[Any] = TFViTMAEForPreTraining(__A )
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = model(__A, training=__A )
UpperCAmelCase : Union[str, Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Union[str, Any] = config_and_inputs
UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCamelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = TFViTMAEModelTester(self )
UpperCAmelCase : int = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[str] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
UpperCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, tf.keras.layers.Layer ) )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : int = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def __magic_name__ ( self : int ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : str = model_class(__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
UpperCAmelCase : Dict = model(__A, noise=__A )
UpperCAmelCase : Any = copy.deepcopy(self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Union[str, Any] = model(**__A, noise=__A )
UpperCAmelCase : Dict = outputs_dict[0].numpy()
UpperCAmelCase : Tuple = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ), 1E-6 )
def __magic_name__ ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : str = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__A : Union[str, Any] ):
UpperCAmelCase : str = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__A ):
UpperCAmelCase : Tuple = v.numpy()
else:
UpperCAmelCase : str = np.array(__A )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
UpperCAmelCase : Any = self._prepare_for_class(__A, __A )
UpperCAmelCase : Optional[int] = prepare_numpy_arrays(__A )
UpperCAmelCase : str = model(__A, noise=__A )
UpperCAmelCase : str = model(**__A, noise=__A )
self.assert_outputs_same(__A, __A )
def __magic_name__ ( self : int, __A : str, __A : Union[str, Any], __A : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase : Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : int = tf.constant(__A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase : List[Any] = tf_noise
super().check_pt_tf_models(__A, __A, __A )
def __magic_name__ ( self : str ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__A )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(__A, __A ),)
if isinstance(__A, __A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__A, '''_keras_serializable''', __A )
}
UpperCAmelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : str = tf.convert_to_tensor(__A )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
UpperCAmelCase : Tuple = main_layer_class(__A )
UpperCAmelCase : int = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCAmelCase : List[Any] = tf.keras.Model(__A, outputs=main_layer(__A ) )
UpperCAmelCase : List[Any] = model(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__A, '''keras_model.h5''' )
model.save(__A )
UpperCAmelCase : List[str] = tf.keras.models.load_model(
__A, custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__A, tf.keras.Model )
UpperCAmelCase : Tuple = model(__A )
self.assert_outputs_same(__A, __A )
@slow
def __magic_name__ ( self : Dict ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[str] = self._prepare_for_class(__A, __A )
UpperCAmelCase : Union[str, Any] = model(__A, noise=__A )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : Optional[int] = outputs.last_hidden_state.numpy()
UpperCAmelCase : Union[str, Any] = 0
else:
UpperCAmelCase : Optional[int] = outputs.logits.numpy()
UpperCAmelCase : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A, saved_model=__A )
UpperCAmelCase : Dict = model_class.from_pretrained(__A )
UpperCAmelCase : str = model(__A, noise=__A )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : int = after_outputs['''last_hidden_state'''].numpy()
UpperCAmelCase : Dict = 0
else:
UpperCAmelCase : Any = after_outputs['''logits'''].numpy()
UpperCAmelCase : Dict = 0
UpperCAmelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A, 1E-5 )
def __magic_name__ ( self : Optional[Any] ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
UpperCAmelCase : List[Any] = model(__A, noise=__A )
UpperCAmelCase : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__A )
UpperCAmelCase : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCAmelCase : str = model_class.from_config(model.config )
UpperCAmelCase : List[str] = new_model(__A ) # Build model
new_model.set_weights(model.get_weights() )
UpperCAmelCase : Tuple = new_model(__A, noise=__A )
self.assert_outputs_same(__A, __A )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __magic_name__ ( self : Tuple ):
pass
@slow
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__A )
def a__ ( ) -> Dict:
UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[str] ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : str ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase : Tuple = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
UpperCAmelCase : List[str] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : str = image_processor(images=__A, return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase : Optional[int] = ViTMAEConfig()
UpperCAmelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCAmelCase : Optional[int] = model(**__A, noise=__A )
# verify the logits
UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : List[str] = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3], __A, atol=1E-4 )
| 336 | 1 |
def a__ ( UpperCAmelCase : list[list[float]] ) -> list[list[float]]:
UpperCAmelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(UpperCAmelCase ):
if len(UpperCAmelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(UpperCAmelCase ) )
return data_lists
def a__ ( UpperCAmelCase : list[list[float]] , UpperCAmelCase : list[int] ) -> list[list[float]]:
UpperCAmelCase : list[list[float]] = []
for dlist, weight in zip(UpperCAmelCase , UpperCAmelCase ):
UpperCAmelCase : int = min(UpperCAmelCase )
UpperCAmelCase : Dict = max(UpperCAmelCase )
UpperCAmelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCAmelCase : Tuple = f'''Invalid weight of {weight:f} provided'''
raise ValueError(UpperCAmelCase )
score_lists.append(UpperCAmelCase )
return score_lists
def a__ ( UpperCAmelCase : list[list[float]] ) -> list[float]:
UpperCAmelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(UpperCAmelCase ):
UpperCAmelCase : Tuple = final_scores[j] + ele
return final_scores
def a__ ( UpperCAmelCase : list[list[float]] , UpperCAmelCase : list[int] ) -> list[list[float]]:
UpperCAmelCase : List[Any] = get_data(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = calculate_each_score(UpperCAmelCase , UpperCAmelCase )
UpperCAmelCase : int = generate_final_scores(UpperCAmelCase )
# append scores to source data
for i, ele in enumerate(UpperCAmelCase ):
source_data[i].append(UpperCAmelCase )
return source_data
| 336 |
def a__ ( UpperCAmelCase : int ) -> int:
UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , UpperCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_lowerCamelCase : List[Any] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
_lowerCamelCase : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 336 | 1 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __UpperCAmelCase :
UpperCamelCase = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
UpperCamelCase = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
UpperCamelCase = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def a__ ( ) -> str:
UpperCAmelCase : int = HfArgumentParser((ModelArguments,) )
((UpperCAmelCase) , ) : Union[str, Any] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Dict = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=UpperCAmelCase , decoder_config=UpperCAmelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
UpperCAmelCase : int = decoder_config.decoder_start_token_id
UpperCAmelCase : int = decoder_config.pad_token_id
if decoder_start_token_id is None:
UpperCAmelCase : int = decoder_config.bos_token_id
if pad_token_id is None:
UpperCAmelCase : Optional[int] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
UpperCAmelCase : Tuple = decoder_config.eos_token_id
UpperCAmelCase : List[str] = decoder_start_token_id
UpperCAmelCase : Tuple = pad_token_id
UpperCAmelCase : int = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 336 |
from __future__ import annotations
def a__ ( UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase )
# We need to create solution object to save path.
UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )]
UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase )
if solved:
print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Dict = len(UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase : Dict = 1
return True
UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase : str = 1
# check for directions
if (
run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase )
or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase )
):
return True
UpperCAmelCase : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 1 |
_lowerCamelCase : Optional[int] = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 336 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __UpperCAmelCase :
def __init__( self : List[Any], __A : List[str], __A : List[str]=1_3, __A : Any=6_4, __A : Optional[Any]=2, __A : str=3, __A : str=True, __A : str=True, __A : Optional[Any]=3_2, __A : List[str]=5, __A : int=4, __A : str=3_7, __A : str="gelu", __A : Dict=0.1, __A : List[Any]=0.1, __A : Dict=1_0, __A : int=0.0_2, __A : Any=[1, 1_6, 4, 4], __A : Optional[int]=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : List[str] = patch_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : int = scope
UpperCAmelCase : List[str] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase : str = (self.image_size // 3_2) ** 2
UpperCAmelCase : List[str] = num_patches + 1
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : str = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Any ):
UpperCAmelCase : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__A, )
def __magic_name__ ( self : Optional[int], __A : Optional[int], __A : int, __A : Tuple ):
UpperCAmelCase : int = ViTHybridModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : Dict, __A : str, __A : List[str] ):
UpperCAmelCase : str = self.type_sequence_label_size
UpperCAmelCase : List[Any] = ViTHybridForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : int ):
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs
UpperCAmelCase : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = ViTHybridModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : int ):
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, nn.Linear ) )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(__A )
UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = _config_zero_init(__A )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(config=__A )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase : Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@slow
def __magic_name__ ( self : List[str] ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Union[str, Any] = ViTHybridModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> Tuple:
UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : str ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
UpperCAmelCase : Tuple = self.default_image_processor
UpperCAmelCase : int = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**__A )
# verify the logits
UpperCAmelCase : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Optional[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
@require_accelerate
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Union[str, Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' )
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=__A, return_tensors='''pt''' )
UpperCAmelCase : Dict = model(**__A )
UpperCAmelCase : Any = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase : Dict = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
| 336 | 1 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def __magic_name__ ( *__A : Optional[int], **__A : int ):
pass
@is_pipeline_test
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __magic_name__ ( self : Any, __A : Dict, __A : List[Any], __A : Dict ):
UpperCAmelCase : Union[str, Any] = pipeline(
'''zero-shot-object-detection''', model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
UpperCAmelCase : List[str] = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def __magic_name__ ( self : Dict, __A : int, __A : List[Any] ):
UpperCAmelCase : Union[str, Any] = object_detector(examples[0], threshold=0.0 )
UpperCAmelCase : str = len(__A )
self.assertGreater(__A, 0 )
self.assertEqual(
__A, [
{
'''score''': ANY(__A ),
'''label''': ANY(__A ),
'''box''': {'''xmin''': ANY(__A ), '''ymin''': ANY(__A ), '''xmax''': ANY(__A ), '''ymax''': ANY(__A )},
}
for i in range(__A )
], )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __magic_name__ ( self : Any ):
pass
@require_torch
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : int = pipeline(
'''zero-shot-object-detection''', model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
UpperCAmelCase : List[str] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''', candidate_labels=['''cat''', '''remote''', '''couch'''], threshold=0.6_4, )
self.assertEqual(
nested_simplify(__A, decimals=4 ), [
{'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
], )
UpperCAmelCase : Optional[int] = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
], threshold=0.6_4, )
self.assertEqual(
nested_simplify(__A, decimals=4 ), [
[
{'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
], )
@require_torch
@slow
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Dict = pipeline('''zero-shot-object-detection''' )
UpperCAmelCase : Optional[int] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], )
self.assertEqual(
nested_simplify(__A, decimals=4 ), [
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
], )
UpperCAmelCase : Optional[Any] = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
], )
self.assertEqual(
nested_simplify(__A, decimals=4 ), [
[
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
], )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __magic_name__ ( self : Union[str, Any] ):
pass
@require_torch
@slow
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[int] = 0.2
UpperCAmelCase : Union[str, Any] = pipeline('''zero-shot-object-detection''' )
UpperCAmelCase : Tuple = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], threshold=__A, )
self.assertEqual(
nested_simplify(__A, decimals=4 ), [
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
], )
@require_torch
@slow
def __magic_name__ ( self : Any ):
UpperCAmelCase : List[str] = 2
UpperCAmelCase : int = pipeline('''zero-shot-object-detection''' )
UpperCAmelCase : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], top_k=__A, )
self.assertEqual(
nested_simplify(__A, decimals=4 ), [
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
], )
| 336 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def a__ ( ) -> tuple[list[int], int]:
UpperCAmelCase : str = [randint(-1_000 , 1_000 ) for i in range(10 )]
UpperCAmelCase : Any = randint(-5_000 , 5_000 )
return (arr, r)
_lowerCamelCase : Any = make_dataset()
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, ...]:
for triplet in permutations(UpperCAmelCase , 3 ):
if sum(UpperCAmelCase ) == target:
return tuple(sorted(UpperCAmelCase ) )
return (0, 0, 0)
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, int, int]:
arr.sort()
UpperCAmelCase : Tuple = len(UpperCAmelCase )
for i in range(n - 1 ):
UpperCAmelCase , UpperCAmelCase : int = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def a__ ( ) -> tuple[float, float]:
UpperCAmelCase : Union[str, Any] = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
UpperCAmelCase : Tuple = '''
triplet_sum1(*dataset)
'''
UpperCAmelCase : List[str] = '''
triplet_sum2(*dataset)
'''
UpperCAmelCase : Tuple = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 )
UpperCAmelCase : str = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 )
return (min(UpperCAmelCase ), min(UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCamelCase : int = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 336 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_lowerCamelCase : Optional[int] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_lowerCamelCase : Union[str, Any] = {
"facebook/bart-base": 1_0_2_4,
"facebook/bart-large": 1_0_2_4,
"facebook/bart-large-mnli": 1_0_2_4,
"facebook/bart-large-cnn": 1_0_2_4,
"facebook/bart-large-xsum": 1_0_2_4,
"yjernite/bart_eli5": 1_0_2_4,
}
@lru_cache()
def a__ ( ) -> List[Any]:
UpperCAmelCase : Any = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCAmelCase : Dict = bs[:]
UpperCAmelCase : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase : List[Any] = [chr(UpperCAmelCase ) for n in cs]
return dict(zip(UpperCAmelCase , UpperCAmelCase ) )
def a__ ( UpperCAmelCase : List[Any] ) -> int:
UpperCAmelCase : int = set()
UpperCAmelCase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase : Tuple = char
return pairs
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : str, __A : int, __A : Tuple, __A : Tuple="replace", __A : Any="<s>", __A : Dict="</s>", __A : Union[str, Any]="</s>", __A : Dict="<s>", __A : Any="<unk>", __A : int="<pad>", __A : List[Any]="<mask>", __A : Dict=False, **__A : Any, ):
UpperCAmelCase : str = AddedToken(__A, lstrip=__A, rstrip=__A ) if isinstance(__A, __A ) else bos_token
UpperCAmelCase : Optional[Any] = AddedToken(__A, lstrip=__A, rstrip=__A ) if isinstance(__A, __A ) else eos_token
UpperCAmelCase : int = AddedToken(__A, lstrip=__A, rstrip=__A ) if isinstance(__A, __A ) else sep_token
UpperCAmelCase : Union[str, Any] = AddedToken(__A, lstrip=__A, rstrip=__A ) if isinstance(__A, __A ) else cls_token
UpperCAmelCase : Optional[int] = AddedToken(__A, lstrip=__A, rstrip=__A ) if isinstance(__A, __A ) else unk_token
UpperCAmelCase : str = AddedToken(__A, lstrip=__A, rstrip=__A ) if isinstance(__A, __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : List[Any] = AddedToken(__A, lstrip=__A, rstrip=__A ) if isinstance(__A, __A ) else mask_token
super().__init__(
errors=__A, bos_token=__A, eos_token=__A, unk_token=__A, sep_token=__A, cls_token=__A, pad_token=__A, mask_token=__A, add_prefix_space=__A, **__A, )
with open(__A, encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase : List[Any] = json.load(__A )
UpperCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase : Tuple = errors # how to handle errors in decoding
UpperCAmelCase : Tuple = bytes_to_unicode()
UpperCAmelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__A, encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase : Any = merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase : int = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase : Optional[Any] = dict(zip(__A, range(len(__A ) ) ) )
UpperCAmelCase : List[str] = {}
UpperCAmelCase : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase : str = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __magic_name__ ( self : List[Any] ):
return len(self.encoder )
def __magic_name__ ( self : int ):
return dict(self.encoder, **self.added_tokens_encoder )
def __magic_name__ ( self : Any, __A : Union[str, Any] ):
if token in self.cache:
return self.cache[token]
UpperCAmelCase : int = tuple(__A )
UpperCAmelCase : List[Any] = get_pairs(__A )
if not pairs:
return token
while True:
UpperCAmelCase : Dict = min(__A, key=lambda __A : self.bpe_ranks.get(__A, float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase : str = bigram
UpperCAmelCase : str = []
UpperCAmelCase : str = 0
while i < len(__A ):
try:
UpperCAmelCase : Union[str, Any] = word.index(__A, __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase : List[str] = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase : List[Any] = tuple(__A )
UpperCAmelCase : Optional[Any] = new_word
if len(__A ) == 1:
break
else:
UpperCAmelCase : int = get_pairs(__A )
UpperCAmelCase : List[str] = ''' '''.join(__A )
UpperCAmelCase : List[Any] = word
return word
def __magic_name__ ( self : Dict, __A : int ):
UpperCAmelCase : Any = []
for token in re.findall(self.pat, __A ):
UpperCAmelCase : Optional[int] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(''' ''' ) )
return bpe_tokens
def __magic_name__ ( self : Any, __A : Optional[Any] ):
return self.encoder.get(__A, self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : List[str], __A : List[Any] ):
return self.decoder.get(__A )
def __magic_name__ ( self : int, __A : List[str] ):
UpperCAmelCase : Dict = ''''''.join(__A )
UpperCAmelCase : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors )
return text
def __magic_name__ ( self : Optional[int], __A : str, __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : str = os.path.join(
__A, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase : Dict = os.path.join(
__A, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__A, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=__A, ensure_ascii=__A ) + '''\n''' )
UpperCAmelCase : Dict = 0
with open(__A, '''w''', encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase : Optional[Any] = token_index
writer.write(''' '''.join(__A ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __magic_name__ ( self : str, __A : List[int], __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
UpperCAmelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self : Any, __A : List[int], __A : Optional[List[int]] = None, __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A, token_ids_a=__A, already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def __magic_name__ ( self : List[Any], __A : List[int], __A : Optional[List[int]] = None ):
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self : Tuple, __A : Union[str, Any], __A : Optional[int]=False, **__A : List[Any] ):
UpperCAmelCase : Optional[Any] = kwargs.pop('''add_prefix_space''', self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
UpperCAmelCase : Optional[Any] = ''' ''' + text
return (text, kwargs)
| 336 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __UpperCAmelCase :
def __magic_name__ ( self : int, __A : Dict ):
raise NotImplementedError()
def __magic_name__ ( self : int ):
raise NotImplementedError()
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : str, __A : "AutoTokenizer", __A : bool = False, **__A : str ):
UpperCAmelCase : List[str] = tokenizer
UpperCAmelCase : str = skip_prompt
UpperCAmelCase : List[str] = decode_kwargs
# variables used in the streaming process
UpperCAmelCase : Dict = []
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = True
def __magic_name__ ( self : Dict, __A : Optional[int] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
UpperCAmelCase : Union[str, Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase : Optional[int] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase : Any = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase : Optional[Any] = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def __magic_name__ ( self : str ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
UpperCAmelCase : int = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
UpperCAmelCase : Dict = text[self.print_len :]
UpperCAmelCase : List[Any] = []
UpperCAmelCase : List[Any] = 0
else:
UpperCAmelCase : Dict = ''''''
UpperCAmelCase : str = True
self.on_finalized_text(__A, stream_end=__A )
def __magic_name__ ( self : List[str], __A : str, __A : bool = False ):
print(__A, flush=__A, end='''''' if not stream_end else None )
def __magic_name__ ( self : List[Any], __A : Optional[int] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Dict, __A : "AutoTokenizer", __A : bool = False, __A : Optional[float] = None, **__A : str ):
super().__init__(__A, __A, **__A )
UpperCAmelCase : Dict = Queue()
UpperCAmelCase : Any = None
UpperCAmelCase : Any = timeout
def __magic_name__ ( self : Dict, __A : str, __A : bool = False ):
self.text_queue.put(__A, timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal, timeout=self.timeout )
def __iter__( self : int ):
return self
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 336 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( nn.Module ):
UpperCamelCase = 42
UpperCamelCase = (1_6, 3_2, 9_6, 2_5_6)
UpperCamelCase = jnp.floataa
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Any = nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
UpperCAmelCase : int = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCAmelCase : Dict = self.block_out_channels[i]
UpperCAmelCase : List[str] = self.block_out_channels[i + 1]
UpperCAmelCase : Union[str, Any] = nn.Conv(
__A, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(__A )
UpperCAmelCase : Tuple = nn.Conv(
__A, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(__A )
UpperCAmelCase : Optional[Any] = blocks
UpperCAmelCase : List[str] = nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : List[Any], __A : Dict ):
UpperCAmelCase : List[str] = self.conv_in(__A )
UpperCAmelCase : Dict = nn.silu(__A )
for block in self.blocks:
UpperCAmelCase : str = block(__A )
UpperCAmelCase : Tuple = nn.silu(__A )
UpperCAmelCase : Any = self.conv_out(__A )
return embedding
@flax_register_to_config
class __UpperCAmelCase ( nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase = 3_2
UpperCamelCase = 4
UpperCamelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCamelCase = False
UpperCamelCase = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
UpperCamelCase = 2
UpperCamelCase = 8
UpperCamelCase = None
UpperCamelCase = 1_2_8_0
UpperCamelCase = 0.0
UpperCamelCase = False
UpperCamelCase = jnp.floataa
UpperCamelCase = True
UpperCamelCase = 0
UpperCamelCase = "rgb"
UpperCamelCase = (1_6, 3_2, 9_6, 2_5_6)
def __magic_name__ ( self : Dict, __A : jax.random.KeyArray ):
# init input tensors
UpperCAmelCase : str = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase : str = jnp.zeros(__A, dtype=jnp.floataa )
UpperCAmelCase : List[str] = jnp.ones((1,), dtype=jnp.intaa )
UpperCAmelCase : List[str] = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
UpperCAmelCase : Optional[Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCAmelCase : Optional[int] = jnp.zeros(__A, dtype=jnp.floataa )
UpperCAmelCase , UpperCAmelCase : List[Any] = jax.random.split(__A )
UpperCAmelCase : List[str] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__A, __A, __A, __A, __A )["params"]
def __magic_name__ ( self : int ):
UpperCAmelCase : str = self.block_out_channels
UpperCAmelCase : Dict = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase : Dict = self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase : str = nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
UpperCAmelCase : List[str] = FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
UpperCAmelCase : Dict = FlaxTimestepEmbedding(__A, dtype=self.dtype )
UpperCAmelCase : Union[str, Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
UpperCAmelCase : Dict = self.only_cross_attention
if isinstance(__A, __A ):
UpperCAmelCase : Dict = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__A, __A ):
UpperCAmelCase : Optional[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : Union[str, Any] = block_out_channels[0]
UpperCAmelCase : List[str] = nn.Conv(
__A, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(__A )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCAmelCase : str = output_channel
UpperCAmelCase : Optional[Any] = block_out_channels[i]
UpperCAmelCase : Dict = i == len(__A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase : Optional[int] = FlaxCrossAttnDownBlockaD(
in_channels=__A, out_channels=__A, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
UpperCAmelCase : List[str] = FlaxDownBlockaD(
in_channels=__A, out_channels=__A, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(__A )
for _ in range(self.layers_per_block ):
UpperCAmelCase : Optional[int] = nn.Conv(
__A, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(__A )
if not is_final_block:
UpperCAmelCase : List[str] = nn.Conv(
__A, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(__A )
UpperCAmelCase : Dict = down_blocks
UpperCAmelCase : str = controlnet_down_blocks
# mid
UpperCAmelCase : Optional[Any] = block_out_channels[-1]
UpperCAmelCase : Tuple = FlaxUNetMidBlockaDCrossAttn(
in_channels=__A, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
UpperCAmelCase : int = nn.Conv(
__A, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : Dict, __A : Any, __A : int, __A : int, __A : Optional[Any], __A : float = 1.0, __A : bool = True, __A : bool = False, ):
UpperCAmelCase : Optional[Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCAmelCase : str = jnp.flip(__A, axis=1 )
# 1. time
if not isinstance(__A, jnp.ndarray ):
UpperCAmelCase : int = jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(__A, jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCAmelCase : str = timesteps.astype(dtype=jnp.floataa )
UpperCAmelCase : List[str] = jnp.expand_dims(__A, 0 )
UpperCAmelCase : Dict = self.time_proj(__A )
UpperCAmelCase : Dict = self.time_embedding(__A )
# 2. pre-process
UpperCAmelCase : Dict = jnp.transpose(__A, (0, 2, 3, 1) )
UpperCAmelCase : List[Any] = self.conv_in(__A )
UpperCAmelCase : str = jnp.transpose(__A, (0, 2, 3, 1) )
UpperCAmelCase : Optional[Any] = self.controlnet_cond_embedding(__A )
sample += controlnet_cond
# 3. down
UpperCAmelCase : List[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(__A, __A ):
UpperCAmelCase , UpperCAmelCase : str = down_block(__A, __A, __A, deterministic=not train )
else:
UpperCAmelCase , UpperCAmelCase : Tuple = down_block(__A, __A, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCAmelCase : List[str] = self.mid_block(__A, __A, __A, deterministic=not train )
# 5. contronet blocks
UpperCAmelCase : Union[str, Any] = ()
for down_block_res_sample, controlnet_block in zip(__A, self.controlnet_down_blocks ):
UpperCAmelCase : str = controlnet_block(__A )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase : Any = controlnet_down_block_res_samples
UpperCAmelCase : Union[str, Any] = self.controlnet_mid_block(__A )
# 6. scaling
UpperCAmelCase : Optional[int] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__A, mid_block_res_sample=__A )
| 336 |
import numpy
# List of input, output pairs
_lowerCamelCase : Dict = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
_lowerCamelCase : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
_lowerCamelCase : Dict = [2, 4, 1, 5]
_lowerCamelCase : Dict = len(train_data)
_lowerCamelCase : int = 0.0_0_9
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]="train" ) -> Dict:
return calculate_hypothesis_value(UpperCAmelCase , UpperCAmelCase ) - output(
UpperCAmelCase , UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Any:
UpperCAmelCase : str = 0
for i in range(len(UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : str=m ) -> Dict:
UpperCAmelCase : Optional[int] = 0
for i in range(UpperCAmelCase ):
if index == -1:
summation_value += _error(UpperCAmelCase )
else:
summation_value += _error(UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def a__ ( UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase : Dict = summation_of_cost_derivative(UpperCAmelCase , UpperCAmelCase ) / m
return cost_derivative_value
def a__ ( ) -> List[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase : List[str] = 0.000002
UpperCAmelCase : Any = 0
UpperCAmelCase : Dict = 0
while True:
j += 1
UpperCAmelCase : List[Any] = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase ) ):
UpperCAmelCase : List[str] = get_cost_derivative(i - 1 )
UpperCAmelCase : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase , rtol=UpperCAmelCase , ):
break
UpperCAmelCase : int = temp_parameter_vector
print(('''Number of iterations:''', j) )
def a__ ( ) -> List[Any]:
for i in range(len(UpperCAmelCase ) ):
print(('''Actual output value:''', output(UpperCAmelCase , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(UpperCAmelCase , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 336 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """vit_msn"""
def __init__( self : Tuple, __A : List[str]=7_6_8, __A : List[str]=1_2, __A : Dict=1_2, __A : Any=3_0_7_2, __A : Union[str, Any]="gelu", __A : Optional[Any]=0.0, __A : Tuple=0.0, __A : Tuple=0.0_2, __A : Optional[int]=1E-06, __A : Optional[Any]=2_2_4, __A : Optional[Any]=1_6, __A : int=3, __A : Union[str, Any]=True, **__A : int, ):
super().__init__(**__A )
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : int = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : Optional[int] = layer_norm_eps
UpperCAmelCase : Optional[int] = image_size
UpperCAmelCase : Tuple = patch_size
UpperCAmelCase : str = num_channels
UpperCAmelCase : List[str] = qkv_bias
| 336 |
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[Any] = len(UpperCAmelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
UpperCAmelCase : Optional[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCAmelCase : Any = left
UpperCAmelCase : List[str] = point
elif point > right:
UpperCAmelCase : Any = right
UpperCAmelCase : List[str] = point
else:
if item < current_item:
UpperCAmelCase : Optional[int] = point - 1
else:
UpperCAmelCase : str = point + 1
return None
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ) -> Dict:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
elif point > right:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , point + 1 , UpperCAmelCase )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> int:
if collection != sorted(UpperCAmelCase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
_lowerCamelCase : Optional[int] = 0
if debug == 1:
_lowerCamelCase : Dict = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
_lowerCamelCase : List[Any] = 6_7
_lowerCamelCase : Optional[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 336 | 1 |
def a__ ( UpperCAmelCase : int = 4_000_000 ) -> int:
UpperCAmelCase : int = [0, 1]
UpperCAmelCase : int = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
UpperCAmelCase : str = 0
for j in range(len(UpperCAmelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 336 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else ''''''
UpperCAmelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''),
(f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any:
for i in range(config.num_hidden_layers ):
UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : str = q_bias
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase : str = gamma_a
UpperCAmelCase : Dict = gamma_a
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : str = val
def a__ ( ) -> Optional[int]:
UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]:
UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True
UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase : List[Any] = 1_024
UpperCAmelCase : Optional[Any] = 4_096
UpperCAmelCase : Any = 24
UpperCAmelCase : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : List[Any] = '''huggingface/label-files'''
UpperCAmelCase : Any = '''rvlcdip-id2label.json'''
UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = idalabel
UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model''']
UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase )
# load HuggingFace model
UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase )
model.eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image
UpperCAmelCase : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase )
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
UpperCAmelCase : str = encoding['''pixel_values''']
UpperCAmelCase : Any = model(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify logits
UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected"
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
if has_lm_head:
UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 336 | 1 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_lowerCamelCase : int = logging.get_logger(__name__)
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Dict, *__A : Any, **__A : List[str] ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''', __A, )
super().__init__(*__A, **__A )
| 336 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[int], __A : Optional[int], __A : Any=1_3, __A : str=7, __A : Optional[int]=True, __A : Tuple=True, __A : Union[str, Any]=True, __A : Any=True, __A : Optional[int]=9_9, __A : Tuple=3_2, __A : str=5, __A : Union[str, Any]=4, __A : List[str]=3_7, __A : Tuple="gelu", __A : Optional[int]=0.1, __A : int=0.1, __A : Optional[Any]=5_1_2, __A : int=1_6, __A : Optional[Any]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=4, ):
UpperCAmelCase : Any = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Any = seq_length
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : str = use_attention_mask
UpperCAmelCase : List[str] = use_token_type_ids
UpperCAmelCase : int = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = num_choices
def __magic_name__ ( self : str ):
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase : List[Any] = None
if self.use_attention_mask:
UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Any = None
if self.use_token_type_ids:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = RobertaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__A, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ ( self : int ):
UpperCAmelCase : Any = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = config_and_inputs
UpperCAmelCase : Any = True
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Dict = FlaxRobertaModelTester(self )
@slow
def __magic_name__ ( self : Any ):
for model_class_name in self.all_model_classes:
UpperCAmelCase : Dict = model_class_name.from_pretrained('''roberta-base''', from_pt=__A )
UpperCAmelCase : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
| 336 | 1 |
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase : Union[str, Any] = _modexpt(UpperCAmelCase , exponent // 2 , UpperCAmelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCAmelCase , exponent - 1 , UpperCAmelCase )) % modulo_value
def a__ ( UpperCAmelCase : int = 1_777 , UpperCAmelCase : int = 1_855 , UpperCAmelCase : int = 8 ) -> int:
UpperCAmelCase : int = base
for _ in range(1 , UpperCAmelCase ):
UpperCAmelCase : Any = _modexpt(UpperCAmelCase , UpperCAmelCase , 10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 336 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {"vocab_file": "vocab.txt"}
_lowerCamelCase : List[str] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase : List[Any] = {
"facebook/esm2_t6_8M_UR50D": 1_0_2_4,
"facebook/esm2_t12_35M_UR50D": 1_0_2_4,
}
def a__ ( UpperCAmelCase : List[str] ) -> Any:
with open(UpperCAmelCase , '''r''' ) as f:
UpperCAmelCase : Dict = f.read().splitlines()
return [l.strip() for l in lines]
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Any, __A : Dict, __A : List[Any]="<unk>", __A : List[str]="<cls>", __A : Any="<pad>", __A : Union[str, Any]="<mask>", __A : int="<eos>", **__A : Tuple, ):
super().__init__(**__A )
UpperCAmelCase : Tuple = load_vocab_file(__A )
UpperCAmelCase : List[Any] = dict(enumerate(self.all_tokens ) )
UpperCAmelCase : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase : Union[str, Any] = unk_token
UpperCAmelCase : Optional[Any] = cls_token
UpperCAmelCase : Optional[int] = pad_token
UpperCAmelCase : Optional[int] = mask_token
UpperCAmelCase : List[str] = eos_token
UpperCAmelCase : Optional[Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __magic_name__ ( self : Tuple, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : List[Any], __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : Optional[Any], **__A : Union[str, Any] ):
return text.split()
def __magic_name__ ( self : Optional[int], __A : Dict=False ):
return len(self._id_to_token )
def __magic_name__ ( self : int ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __magic_name__ ( self : Tuple, __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : Union[str, Any], __A : List[int], __A : Optional[List[int]] = None ):
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : Optional[int] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __magic_name__ ( self : Any, __A : List, __A : Optional[List] = None, __A : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase : Dict = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
mask += [0] * len(__A ) + [1]
return mask
def __magic_name__ ( self : Optional[int], __A : List[Any], __A : Dict ):
UpperCAmelCase : Union[str, Any] = os.path.join(__A, (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(__A, '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __magic_name__ ( self : Dict ):
return self.get_vocab_size(with_added_tokens=__A )
def __magic_name__ ( self : Optional[int], __A : Union[List[str], List[AddedToken]], __A : bool = False ):
return super()._add_tokens(__A, special_tokens=__A )
| 336 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Dict = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 336 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A, '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__A, '''depth_multiplier''' ) )
class __UpperCAmelCase :
def __init__( self : int, __A : List[Any], __A : str=1_3, __A : Dict=3, __A : int=3_2, __A : int=0.2_5, __A : List[str]=8, __A : int=8, __A : Dict=6, __A : str=3_2, __A : Any=True, __A : str=True, __A : int=True, __A : Union[str, Any]="relu6", __A : Any=1_2_8_0, __A : List[Any]=0.1, __A : Optional[Any]=0.0_2, __A : Tuple=True, __A : List[Any]=True, __A : str=1_0, __A : Optional[Any]=None, ):
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : str = image_size
UpperCAmelCase : Optional[int] = depth_multiplier
UpperCAmelCase : Union[str, Any] = depth_divisible_by
UpperCAmelCase : Optional[Any] = min_depth
UpperCAmelCase : List[str] = expand_ratio
UpperCAmelCase : Dict = tf_padding
UpperCAmelCase : str = output_stride
UpperCAmelCase : Union[str, Any] = first_layer_is_expansion
UpperCAmelCase : List[Any] = finegrained_output
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCAmelCase : Optional[Any] = classifier_dropout_prob
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Any = scope
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Dict = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def __magic_name__ ( self : List[Any], __A : Dict, __A : Optional[Any], __A : Optional[int], __A : Union[str, Any] ):
UpperCAmelCase : Any = MobileNetVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
self.parent.assertEqual(
result.pooler_output.shape, (self.batch_size, self.last_hidden_size), )
def __magic_name__ ( self : str, __A : Union[str, Any], __A : Dict, __A : Optional[Any], __A : str ):
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : Any = MobileNetVaForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : List[str], __A : Dict, __A : Dict ):
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Dict = MobileNetVaForSemanticSegmentation(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCAmelCase : Optional[Any] = model(__A, labels=__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = MobileNetVaModelTester(self )
UpperCAmelCase : List[Any] = MobileNetVaConfigTester(self, config_class=__A, has_text_modality=__A )
def __magic_name__ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Tuple ):
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def __magic_name__ ( self : Any ):
pass
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : int ):
def check_hidden_states_output(__A : Any, __A : Optional[Any], __A : str ):
UpperCAmelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Optional[Any] = outputs.hidden_states
UpperCAmelCase : List[Any] = 1_6
self.assertEqual(len(__A ), __A )
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def __magic_name__ ( self : Dict ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = MobileNetVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> int:
UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[Any] ):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A )
UpperCAmelCase : Optional[int] = self.default_image_processor
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : str = model(**__A )
# verify the logits
UpperCAmelCase : int = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = model.to(__A )
UpperCAmelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**__A )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase : Tuple = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
], device=__A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], __A, atol=1E-4 ) )
| 336 | 1 |
def a__ ( UpperCAmelCase : int ) -> int:
UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , UpperCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_lowerCamelCase : List[Any] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
_lowerCamelCase : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 336 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """codegen"""
UpperCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any, __A : Optional[int]=5_0_4_0_0, __A : Tuple=2_0_4_8, __A : Optional[int]=2_0_4_8, __A : List[str]=4_0_9_6, __A : List[str]=2_8, __A : Union[str, Any]=1_6, __A : Tuple=6_4, __A : Union[str, Any]=None, __A : Union[str, Any]="gelu_new", __A : Any=0.0, __A : Dict=0.0, __A : str=0.0, __A : Optional[int]=1E-5, __A : Any=0.0_2, __A : Any=True, __A : Union[str, Any]=5_0_2_5_6, __A : List[str]=5_0_2_5_6, __A : int=False, **__A : List[Any], ):
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Tuple = n_ctx
UpperCAmelCase : Tuple = n_positions
UpperCAmelCase : Optional[int] = n_embd
UpperCAmelCase : Union[str, Any] = n_layer
UpperCAmelCase : List[str] = n_head
UpperCAmelCase : Tuple = n_inner
UpperCAmelCase : int = rotary_dim
UpperCAmelCase : List[Any] = activation_function
UpperCAmelCase : List[str] = resid_pdrop
UpperCAmelCase : Optional[Any] = embd_pdrop
UpperCAmelCase : str = attn_pdrop
UpperCAmelCase : Tuple = layer_norm_epsilon
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : Any = bos_token_id
UpperCAmelCase : List[str] = eos_token_id
super().__init__(
bos_token_id=__A, eos_token_id=__A, tie_word_embeddings=__A, **__A )
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Any, __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ):
super().__init__(__A, task=__A, patching_specs=__A, use_past=__A )
if not getattr(self._config, '''pad_token_id''', __A ):
# TODO: how to do that better?
UpperCAmelCase : Union[str, Any] = 0
@property
def __magic_name__ ( self : str ):
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__A, direction='''inputs''' )
UpperCAmelCase : int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __magic_name__ ( self : Dict ):
return self._config.n_layer
@property
def __magic_name__ ( self : List[str] ):
return self._config.n_head
def __magic_name__ ( self : str, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ):
UpperCAmelCase : Union[str, Any] = super(__A, self ).generate_dummy_inputs(
__A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase : str = seqlen + 2
UpperCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase : Optional[int] = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase : Optional[Any] = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase : Dict = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 )
return ordered_inputs
@property
def __magic_name__ ( self : Tuple ):
return 1_3
| 336 | 1 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase : List[Any] = '''xvjiarui/stable-diffusion-2-inpainting'''
UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionInpaintPipeline.from_pretrained(__A, safety_checker=__A )
UpperCAmelCase : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase : Dict = jax.random.PRNGKey(0 )
UpperCAmelCase : Any = 5_0
UpperCAmelCase : int = jax.device_count()
UpperCAmelCase : Tuple = num_samples * [prompt]
UpperCAmelCase : int = num_samples * [init_image]
UpperCAmelCase : Any = num_samples * [mask_image]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = pipeline.prepare_inputs(__A, __A, __A )
# shard inputs and rng
UpperCAmelCase : int = replicate(__A )
UpperCAmelCase : List[Any] = jax.random.split(__A, jax.device_count() )
UpperCAmelCase : Optional[Any] = shard(__A )
UpperCAmelCase : Tuple = shard(__A )
UpperCAmelCase : int = shard(__A )
UpperCAmelCase : int = pipeline(
__A, __A, __A, __A, __A, __A, jit=__A )
UpperCAmelCase : Any = output.images.reshape(__A, 5_1_2, 5_1_2, 3 )
UpperCAmelCase : Union[str, Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCAmelCase : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase : Dict = jnp.array(
[0.3_6_1_1_3_0_7, 0.3_7_6_4_9_7_3_6, 0.3_7_5_7_4_0_8, 0.3_8_2_1_3_9_5_3, 0.3_9_2_9_5_1_6_7, 0.3_8_4_1_6_3_1, 0.4_1_5_5_4_9_7_8, 0.4_1_3_7_4_7_5, 0.4_2_1_7_0_8_4] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 336 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 336 | 1 |
_lowerCamelCase : Any = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def a__ ( UpperCAmelCase : int ) -> int:
UpperCAmelCase : int = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowerCamelCase : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Dict = False
def a__ ( UpperCAmelCase : int ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase : Optional[int] = chain(next_number(UpperCAmelCase ) )
UpperCAmelCase : Union[str, Any] = number_chain
while number < 10_000_000:
UpperCAmelCase : Any = number_chain
number *= 10
return number_chain
def a__ ( UpperCAmelCase : int = 10_000_000 ) -> int:
for i in range(1 , UpperCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 336 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __UpperCAmelCase :
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
@classmethod
def __magic_name__ ( cls : Any ):
return cls()
@dataclass
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@property
def __magic_name__ ( self : Optional[int] ):
return True
@register_to_config
def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ):
pass
def __magic_name__ ( self : Optional[Any] ):
return KarrasVeSchedulerState.create()
def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ):
UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy()
UpperCAmelCase : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, )
def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 )
else:
UpperCAmelCase : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 )
UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape )
UpperCAmelCase : Tuple = sigma + gamma * sigma
UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : int = sample_hat + sigma_hat * model_output
UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output
UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ):
raise NotImplementedError()
| 336 | 1 |
import argparse
from collections import defaultdict
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict ) -> str:
UpperCAmelCase : Any = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(UpperCAmelCase , '''r''' ) as f:
UpperCAmelCase : Dict = f.readlines()
UpperCAmelCase : Optional[int] = f'''class {class_name}('''
UpperCAmelCase : Optional[int] = f'''{4 * " "}def {test_name}('''
UpperCAmelCase : Dict = f'''{8 * " "}{correct_line.split()[0]}'''
UpperCAmelCase : Dict = f'''{16 * " "}{correct_line.split()[0]}'''
UpperCAmelCase : Any = False
UpperCAmelCase : Any = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Dict = 0
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[str] = []
for line in lines:
if line.startswith(UpperCAmelCase ):
UpperCAmelCase : List[str] = True
elif in_class and line.startswith(UpperCAmelCase ):
UpperCAmelCase : Tuple = True
elif in_class and in_func and (line.startswith(UpperCAmelCase ) or line.startswith(UpperCAmelCase )):
UpperCAmelCase : Any = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase : Dict = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
UpperCAmelCase : Union[str, Any] = False
else:
new_lines.append(UpperCAmelCase )
with open(UpperCAmelCase , '''w''' ) as f:
for line in new_lines:
f.write(UpperCAmelCase )
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : List[str]=None ) -> str:
if fail is not None:
with open(UpperCAmelCase , '''r''' ) as f:
UpperCAmelCase : List[Any] = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase : List[str] = None
with open(UpperCAmelCase , '''r''' ) as f:
UpperCAmelCase : Optional[int] = f.readlines()
UpperCAmelCase : Tuple = defaultdict(UpperCAmelCase )
for line in correct_lines:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
_lowerCamelCase : int = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 336 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __UpperCAmelCase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def a__ ( ) -> Dict:
if os.name == "nt":
UpperCAmelCase : List[str] = CursorInfo()
UpperCAmelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Dict = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def a__ ( ) -> Optional[int]:
if os.name == "nt":
UpperCAmelCase : int = CursorInfo()
UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Any = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def a__ ( ) -> Optional[Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 336 | 1 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : str ) -> List[str]:
UpperCAmelCase : Tuple = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
UpperCAmelCase : Dict = MaskFormerConfig(backbone_config=UpperCAmelCase )
UpperCAmelCase : int = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase : Any = 847
UpperCAmelCase : Optional[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
UpperCAmelCase : List[str] = 150
UpperCAmelCase : int = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase : str = 171
UpperCAmelCase : Optional[Any] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
UpperCAmelCase : str = 133
UpperCAmelCase : str = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase : Tuple = 19
UpperCAmelCase : Any = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase : Tuple = 65
UpperCAmelCase : Optional[Any] = '''mapillary-vistas-id2label.json'''
UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : str = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
return config
def a__ ( UpperCAmelCase : Optional[int] ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def a__ ( UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : List[str] ) -> Any:
UpperCAmelCase : Any = dct.pop(UpperCAmelCase )
UpperCAmelCase : List[Any] = val
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] ) -> Tuple:
UpperCAmelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase : List[str] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase : List[Any] = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
UpperCAmelCase : Any = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : str = in_proj_weight[:dim, :]
UpperCAmelCase : int = in_proj_bias[: dim]
UpperCAmelCase : Dict = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase : Optional[Any] = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase : Dict = in_proj_weight[
-dim :, :
]
UpperCAmelCase : List[str] = in_proj_bias[-dim :]
# fmt: on
def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> Union[str, Any]:
# fmt: off
UpperCAmelCase : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase : Any = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
UpperCAmelCase : Optional[int] = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Tuple = in_proj_weight[: hidden_size, :]
UpperCAmelCase : Optional[int] = in_proj_bias[:config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase : Optional[Any] = in_proj_weight[-hidden_size :, :]
UpperCAmelCase : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
UpperCAmelCase : Dict = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Dict = in_proj_weight[: hidden_size, :]
UpperCAmelCase : Dict = in_proj_bias[:config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase : Union[str, Any] = in_proj_weight[-hidden_size :, :]
UpperCAmelCase : int = in_proj_bias[-hidden_size :]
# fmt: on
def a__ ( ) -> torch.Tensor:
UpperCAmelCase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Dict = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : bool = False ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = get_maskformer_config(UpperCAmelCase )
# load original state_dict
with open(UpperCAmelCase , '''rb''' ) as f:
UpperCAmelCase : Optional[Any] = pickle.load(UpperCAmelCase )
UpperCAmelCase : Any = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_swin_q_k_v(UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(UpperCAmelCase , UpperCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase : List[str] = torch.from_numpy(UpperCAmelCase )
# load 🤗 model
UpperCAmelCase : Tuple = MaskFormerForInstanceSegmentation(UpperCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(UpperCAmelCase , param.shape )
UpperCAmelCase , UpperCAmelCase : int = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCAmelCase ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
UpperCAmelCase : Optional[Any] = prepare_img()
if "vistas" in model_name:
UpperCAmelCase : List[str] = 65
elif "cityscapes" in model_name:
UpperCAmelCase : List[str] = 65_535
else:
UpperCAmelCase : str = 255
UpperCAmelCase : Union[str, Any] = True if '''ade''' in model_name else False
UpperCAmelCase : Any = MaskFormerImageProcessor(ignore_index=UpperCAmelCase , reduce_labels=UpperCAmelCase )
UpperCAmelCase : int = image_processor(UpperCAmelCase , return_tensors='''pt''' )
UpperCAmelCase : Any = model(**UpperCAmelCase )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase : int = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 336 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase : Tuple = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 336 | 1 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = ["""image_processor""", """tokenizer"""]
UpperCamelCase = """BlipImageProcessor"""
UpperCamelCase = """AutoTokenizer"""
def __init__( self : Any, __A : Union[str, Any], __A : Optional[int], __A : Optional[int] ):
super().__init__(__A, __A )
# add QFormer tokenizer
UpperCAmelCase : Any = qformer_tokenizer
def __call__( self : str, __A : ImageInput = None, __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, __A : bool = True, __A : Union[bool, str, PaddingStrategy] = False, __A : Union[bool, str, TruncationStrategy] = None, __A : Optional[int] = None, __A : int = 0, __A : Optional[int] = None, __A : Optional[bool] = None, __A : bool = False, __A : bool = False, __A : bool = False, __A : bool = False, __A : bool = False, __A : bool = True, __A : Optional[Union[str, TensorType]] = None, **__A : str, ):
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
UpperCAmelCase : Union[str, Any] = BatchFeature()
if text is not None:
UpperCAmelCase : Optional[Any] = self.tokenizer(
text=__A, add_special_tokens=__A, padding=__A, truncation=__A, max_length=__A, stride=__A, pad_to_multiple_of=__A, return_attention_mask=__A, return_overflowing_tokens=__A, return_special_tokens_mask=__A, return_offsets_mapping=__A, return_token_type_ids=__A, return_length=__A, verbose=__A, return_tensors=__A, **__A, )
encoding.update(__A )
UpperCAmelCase : int = self.qformer_tokenizer(
text=__A, add_special_tokens=__A, padding=__A, truncation=__A, max_length=__A, stride=__A, pad_to_multiple_of=__A, return_attention_mask=__A, return_overflowing_tokens=__A, return_special_tokens_mask=__A, return_offsets_mapping=__A, return_token_type_ids=__A, return_length=__A, verbose=__A, return_tensors=__A, **__A, )
UpperCAmelCase : Tuple = qformer_text_encoding.pop('''input_ids''' )
UpperCAmelCase : Dict = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
UpperCAmelCase : int = self.image_processor(__A, return_tensors=__A )
encoding.update(__A )
return encoding
def __magic_name__ ( self : Any, *__A : Any, **__A : Dict ):
return self.tokenizer.batch_decode(*__A, **__A )
def __magic_name__ ( self : Optional[int], *__A : Tuple, **__A : Optional[Any] ):
return self.tokenizer.decode(*__A, **__A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[int] = self.tokenizer.model_input_names
UpperCAmelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __magic_name__ ( self : Optional[int], __A : int, **__A : Tuple ):
if os.path.isfile(__A ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__A, exist_ok=__A )
UpperCAmelCase : Dict = os.path.join(__A, '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(__A )
return super().save_pretrained(__A, **__A )
@classmethod
def __magic_name__ ( cls : int, __A : str, **__A : List[Any] ):
UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(__A, subfolder='''qformer_tokenizer''' )
UpperCAmelCase : Tuple = cls._get_arguments_from_pretrained(__A, **__A )
args.append(__A )
return cls(*__A )
| 336 |
from __future__ import annotations
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
UpperCAmelCase : str = number_of_bytes // partitions
UpperCAmelCase : Dict = []
for i in range(UpperCAmelCase ):
UpperCAmelCase : int = i * bytes_per_partition + 1
UpperCAmelCase : Optional[int] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 1 |
def a__ ( UpperCAmelCase : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCAmelCase : int = len(bin(UpperCAmelCase )[3:] )
UpperCAmelCase : List[str] = bin(abs(UpperCAmelCase ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : str = (
(
'''1'''
+ '''0''' * (binary_number_length - len(UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_lowerCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def a__ ( UpperCAmelCase : Dict=None ) -> Optional[int]:
if subparsers is not None:
UpperCAmelCase : Tuple = subparsers.add_parser('''tpu-config''' , description=_description )
else:
UpperCAmelCase : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
UpperCAmelCase : Optional[int] = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=UpperCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=UpperCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
UpperCAmelCase : Union[str, Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=UpperCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase : List[str] = defaults.commands
if not args.tpu_name:
UpperCAmelCase : Tuple = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase : int = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCAmelCase : Dict = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ):
UpperCAmelCase : Optional[int] = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
UpperCAmelCase : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , UpperCAmelCase ):
UpperCAmelCase : int = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
UpperCAmelCase : int = '''; '''.join(UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase : Any = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(UpperCAmelCase )}''' )
return
subprocess.run(UpperCAmelCase )
print('''Successfully setup pod.''' )
def a__ ( ) -> Any:
UpperCAmelCase : Any = tpu_command_parser()
UpperCAmelCase : Tuple = parser.parse_args()
tpu_command_launcher(UpperCAmelCase )
| 336 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Any = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
_lowerCamelCase : int = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCAmelCase : List[str] = '''lm_head'''
UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
UpperCAmelCase : List[str] = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
UpperCAmelCase : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCAmelCase : List[str] = value
elif weight_type == "weight_g":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_v":
UpperCAmelCase : int = value
elif weight_type == "bias":
UpperCAmelCase : Optional[int] = value
else:
UpperCAmelCase : Dict = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Dict = []
UpperCAmelCase : Dict = fairseq_model.state_dict()
UpperCAmelCase : Dict = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase : Tuple = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase : Optional[Any] = True
if "*" in mapped_key:
UpperCAmelCase : List[Any] = name.split(UpperCAmelCase )[0].split('''.''' )[-2]
UpperCAmelCase : Optional[Any] = mapped_key.replace('''*''' , UpperCAmelCase )
if "weight_g" in name:
UpperCAmelCase : str = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase : int = '''weight_v'''
elif "bias" in name:
UpperCAmelCase : List[str] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : Union[str, Any] = '''weight'''
else:
UpperCAmelCase : Union[str, Any] = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : int ) -> Dict:
UpperCAmelCase : str = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase : Optional[Any] = name.split('''.''' )
UpperCAmelCase : List[Any] = int(items[0] )
UpperCAmelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCAmelCase : Any = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCAmelCase : Any = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCAmelCase : Union[str, Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCAmelCase : Optional[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def a__ ( UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Any=True ) -> Union[str, Any]:
if config_path is not None:
UpperCAmelCase : Union[str, Any] = UniSpeechConfig.from_pretrained(UpperCAmelCase )
else:
UpperCAmelCase : Optional[Any] = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCAmelCase : str = Dictionary.load_from_json(UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase : List[str] = target_dict.pad_index
UpperCAmelCase : str = target_dict.bos_index
UpperCAmelCase : str = target_dict.eos_index
UpperCAmelCase : List[Any] = len(target_dict.symbols )
UpperCAmelCase : int = os.path.join(UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(UpperCAmelCase ) )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
UpperCAmelCase : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase : int = 42
UpperCAmelCase : List[str] = 43
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(UpperCAmelCase , UpperCAmelCase )
UpperCAmelCase : List[Any] = WavaVecaPhonemeCTCTokenizer(
UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=UpperCAmelCase , )
UpperCAmelCase : Any = True if config.feat_extract_norm == '''layer''' else False
UpperCAmelCase : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
UpperCAmelCase : List[Any] = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
UpperCAmelCase : Tuple = UniSpeechForCTC(UpperCAmelCase )
else:
UpperCAmelCase : Optional[int] = UniSpeechForPreTraining(UpperCAmelCase )
if is_finetuned:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCAmelCase : Optional[Any] = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
hf_unispeech.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_lowerCamelCase : Dict = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 336 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
print('''Loading config file...''' )
def flatten_yaml_as_dict(UpperCAmelCase : Tuple , UpperCAmelCase : Any="" , UpperCAmelCase : Dict="." ):
UpperCAmelCase : List[str] = []
for k, v in d.items():
UpperCAmelCase : List[Any] = parent_key + sep + k if parent_key else k
if isinstance(UpperCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCAmelCase , UpperCAmelCase , sep=UpperCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCAmelCase )
UpperCAmelCase : List[str] = argparse.Namespace()
with open(UpperCAmelCase , '''r''' ) as yaml_file:
try:
UpperCAmelCase : List[str] = yaml.load(UpperCAmelCase , Loader=yaml.FullLoader )
UpperCAmelCase : Optional[int] = flatten_yaml_as_dict(UpperCAmelCase )
for k, v in flat_cfg.items():
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(UpperCAmelCase , str(UpperCAmelCase ) ) )
return config
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> List[Any]:
UpperCAmelCase : int = MobileViTVaConfig()
UpperCAmelCase : str = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase : Any = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase : Any = 384
else:
UpperCAmelCase : Tuple = 256
UpperCAmelCase : int = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase : Optional[Any] = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase : str = 384
else:
UpperCAmelCase : Dict = 256
UpperCAmelCase : List[Any] = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase : Optional[Any] = 151
UpperCAmelCase : Tuple = 512
UpperCAmelCase : Tuple = '''ade20k-id2label.json'''
UpperCAmelCase : Tuple = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase : Dict = 21
UpperCAmelCase : str = 512
UpperCAmelCase : Union[str, Any] = '''pascal-voc-id2label.json'''
UpperCAmelCase : Dict = True
# orig_config
UpperCAmelCase : List[Any] = load_orig_config_file(UpperCAmelCase )
assert getattr(UpperCAmelCase , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase : Tuple = getattr(UpperCAmelCase , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(UpperCAmelCase , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase : str = getattr(UpperCAmelCase , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase : Any = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase : Union[str, Any] = '''huggingface/label-files'''
UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Any = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : int = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> List[str]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : List[str] = val
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=False ) -> Union[str, Any]:
if base_model:
UpperCAmelCase : Dict = ''''''
else:
UpperCAmelCase : Dict = '''mobilevitv2.'''
UpperCAmelCase : Optional[int] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase : List[str] = k[8:]
else:
UpperCAmelCase : Dict = k
if ".block." in k:
UpperCAmelCase : List[Any] = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase : List[str] = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('''conv_1.''' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase : int = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
UpperCAmelCase : Any = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
UpperCAmelCase : str = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
UpperCAmelCase : int = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase : Dict = [0, 1]
elif i == 4:
UpperCAmelCase : Dict = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase : int = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
UpperCAmelCase : Optional[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
UpperCAmelCase : Any = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase : Any = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase : Tuple = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Any:
UpperCAmelCase : str = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(UpperCAmelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = get_mobilevitva_config(UpperCAmelCase , UpperCAmelCase )
# load original state_dict
UpperCAmelCase : List[str] = torch.load(UpperCAmelCase , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase : str = MobileViTVaForSemanticSegmentation(UpperCAmelCase ).eval()
UpperCAmelCase : str = False
else:
UpperCAmelCase : Union[str, Any] = MobileViTVaForImageClassification(UpperCAmelCase ).eval()
UpperCAmelCase : Any = False
# remove and rename some keys of load the original model
UpperCAmelCase : Optional[Any] = checkpoint
remove_unused_keys(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = create_rename_keys(UpperCAmelCase , base_model=UpperCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# load modified state_dict
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase : Any = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase : Union[str, Any] = model(**UpperCAmelCase )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase : Optional[Any] = outputs.logits
UpperCAmelCase : int = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase : str = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1E-4 )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 336 | 1 |
def a__ ( UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Any:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod
else:
UpperCAmelCase : Tuple = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase )
return (b * b) % mod
# a prime number
_lowerCamelCase : Tuple = 7_0_1
_lowerCamelCase : int = 1_0_0_0_0_0_0_0_0_0
_lowerCamelCase : Union[str, Any] = 1_0
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 336 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __UpperCAmelCase ( lowerCamelCase__ ):
def __get__( self : Tuple, __A : Optional[Any], __A : Optional[int]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCAmelCase : str = '''__cached_''' + self.fget.__name__
UpperCAmelCase : int = getattr(__A, __A, __A )
if cached is None:
UpperCAmelCase : Any = self.fget(__A )
setattr(__A, __A, __A )
return cached
def a__ ( UpperCAmelCase : Optional[Any] ) -> Any:
UpperCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_torch_fx_proxy(UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Union[str, Any]:
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : str ) -> Tuple:
return _is_numpy(UpperCAmelCase )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
import torch
return isinstance(UpperCAmelCase , torch.Tensor )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
return False if not is_torch_available() else _is_torch(UpperCAmelCase )
def a__ ( UpperCAmelCase : Tuple ) -> List[str]:
import torch
return isinstance(UpperCAmelCase , torch.device )
def a__ ( UpperCAmelCase : Any ) -> Any:
return False if not is_torch_available() else _is_torch_device(UpperCAmelCase )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ):
if hasattr(UpperCAmelCase , UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
else:
return False
return isinstance(UpperCAmelCase , torch.dtype )
def a__ ( UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase )
def a__ ( UpperCAmelCase : Any ) -> str:
import tensorflow as tf
return isinstance(UpperCAmelCase , tf.Tensor )
def a__ ( UpperCAmelCase : int ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[str] ) -> Tuple:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCAmelCase , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(UpperCAmelCase )
return type(UpperCAmelCase ) == tf.Tensor
def a__ ( UpperCAmelCase : int ) -> List[Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[Any] ) -> Dict:
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCAmelCase , jnp.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]:
return False if not is_flax_available() else _is_jax(UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Tuple:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return [to_py_obj(UpperCAmelCase ) for o in obj]
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase ).tolist()
elif isinstance(UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( UpperCAmelCase : Any ) -> List[str]:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return np.array(UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase )
else:
return obj
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(__A ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase : int = getattr(self, class_fields[0].name )
UpperCAmelCase : str = all(getattr(self, field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__A ):
if isinstance(__A, __A ):
UpperCAmelCase : Tuple = first_field.items()
UpperCAmelCase : Any = True
else:
try:
UpperCAmelCase : Optional[Any] = iter(__A )
UpperCAmelCase : Optional[Any] = True
except TypeError:
UpperCAmelCase : Optional[int] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__A ):
if (
not isinstance(__A, (list, tuple) )
or not len(__A ) == 2
or not isinstance(element[0], __A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self, element[0], element[1] )
if element[1] is not None:
UpperCAmelCase : Union[str, Any] = element[1]
elif first_field is not None:
UpperCAmelCase : Union[str, Any] = first_field
else:
for field in class_fields:
UpperCAmelCase : Optional[Any] = getattr(self, field.name )
if v is not None:
UpperCAmelCase : Optional[int] = v
def __delitem__( self : Union[str, Any], *__A : str, **__A : Tuple ):
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : List[str], *__A : Union[str, Any], **__A : Optional[Any] ):
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Any, *__A : Dict, **__A : str ):
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Dict, *__A : int, **__A : Dict ):
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[str], __A : List[str] ):
if isinstance(__A, __A ):
UpperCAmelCase : int = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any], __A : Dict, __A : Union[str, Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__A, __A )
super().__setattr__(__A, __A )
def __setitem__( self : Dict, __A : List[Any], __A : Union[str, Any] ):
# Will raise a KeyException if needed
super().__setitem__(__A, __A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__A, __A )
def __magic_name__ ( self : List[str] ):
return tuple(self[k] for k in self.keys() )
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@classmethod
def __magic_name__ ( cls : List[Any], __A : Tuple ):
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """longest"""
UpperCamelCase = """max_length"""
UpperCamelCase = """do_not_pad"""
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """pt"""
UpperCamelCase = """tf"""
UpperCamelCase = """np"""
UpperCamelCase = """jax"""
class __UpperCAmelCase :
def __init__( self : Any, __A : List[ContextManager] ):
UpperCAmelCase : Tuple = context_managers
UpperCAmelCase : Tuple = ExitStack()
def __enter__( self : Any ):
for context_manager in self.context_managers:
self.stack.enter_context(__A )
def __exit__( self : List[Any], *__A : Union[str, Any], **__A : Dict ):
self.stack.__exit__(*__A, **__A )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> str:
UpperCAmelCase : int = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( UpperCAmelCase : Dict ) -> Any:
UpperCAmelCase : List[Any] = model_class.__name__
UpperCAmelCase : Union[str, Any] = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( UpperCAmelCase : MutableMapping , UpperCAmelCase : str = "" , UpperCAmelCase : str = "." ) -> Union[str, Any]:
def _flatten_dict(UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]="" , UpperCAmelCase : Any="." ):
for k, v in d.items():
UpperCAmelCase : List[str] = str(UpperCAmelCase ) + delimiter + str(UpperCAmelCase ) if parent_key else k
if v and isinstance(UpperCAmelCase , UpperCAmelCase ):
yield from flatten_dict(UpperCAmelCase , UpperCAmelCase , delimiter=UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
@contextmanager
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : bool = False ) -> Optional[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ) -> Optional[Any]:
if is_numpy_array(UpperCAmelCase ):
return np.transpose(UpperCAmelCase , axes=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.T if axes is None else array.permute(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(UpperCAmelCase , perm=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.transpose(UpperCAmelCase , axes=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.reshape(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.reshape(UpperCAmelCase , UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None ) -> Any:
if is_numpy_array(UpperCAmelCase ):
return np.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> str:
if is_numpy_array(UpperCAmelCase ):
return np.expand_dims(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.unsqueeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.size(UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.size(UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Dict:
for key, value in auto_map.items():
if isinstance(UpperCAmelCase , (tuple, list) ):
UpperCAmelCase : List[Any] = [f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase : List[Any] = f'''{repo_id}--{value}'''
return auto_map
def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]:
for base_class in inspect.getmro(UpperCAmelCase ):
UpperCAmelCase : Any = base_class.__module__
UpperCAmelCase : Dict = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 336 | 1 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A, '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__A, '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(__A, '''num_encoder_blocks''' ) )
class __UpperCAmelCase :
def __init__( self : Optional[Any], __A : Dict, __A : List[Any]=1_3, __A : int=6_4, __A : Union[str, Any]=3, __A : str=4, __A : Optional[Any]=[2, 2, 2, 2], __A : str=[8, 4, 2, 1], __A : Dict=[1_6, 3_2, 6_4, 1_2_8], __A : Union[str, Any]=[1, 4, 8, 1_6], __A : Optional[Any]=[1, 2, 4, 8], __A : Tuple=True, __A : str=True, __A : Optional[Any]="gelu", __A : List[str]=0.1, __A : Union[str, Any]=0.1, __A : int=0.0_2, __A : str=3, __A : Dict=None, ):
UpperCAmelCase : Dict = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : Tuple = image_size
UpperCAmelCase : Optional[Any] = num_channels
UpperCAmelCase : List[Any] = num_encoder_blocks
UpperCAmelCase : Any = sr_ratios
UpperCAmelCase : Optional[Any] = depths
UpperCAmelCase : List[str] = hidden_sizes
UpperCAmelCase : Optional[int] = downsampling_rates
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : int = is_training
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Optional[int] = num_labels
UpperCAmelCase : Optional[int] = scope
def __magic_name__ ( self : int ):
UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Optional[Any] = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Dict ):
return SegformerConfig(
image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, )
def __magic_name__ ( self : Optional[int], __A : Dict, __A : Tuple, __A : Tuple ):
UpperCAmelCase : int = SegformerModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Any = model(__A )
UpperCAmelCase : Optional[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def __magic_name__ ( self : Optional[Any], __A : Dict, __A : List[str], __A : Union[str, Any] ):
UpperCAmelCase : Union[str, Any] = self.num_labels
UpperCAmelCase : Tuple = SegformerForSemanticSegmentation(__A )
model.to(__A )
model.eval()
UpperCAmelCase : List[str] = model(__A )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
UpperCAmelCase : Union[str, Any] = model(__A, labels=__A )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss, 0.0 )
def __magic_name__ ( self : str, __A : List[Any], __A : Dict, __A : List[str] ):
UpperCAmelCase : Dict = 1
UpperCAmelCase : Union[str, Any] = SegformerForSemanticSegmentation(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : List[str] = torch.randint(0, 1, (self.batch_size, self.image_size, self.image_size) ).to(__A )
UpperCAmelCase : Any = model(__A, labels=__A )
self.parent.assertGreater(result.loss, 0.0 )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs
UpperCAmelCase : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : int ):
UpperCAmelCase : List[Any] = SegformerModelTester(self )
UpperCAmelCase : Union[str, Any] = SegformerConfigTester(self, config_class=__A )
def __magic_name__ ( self : int ):
self.config_tester.run_common_tests()
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__A )
def __magic_name__ ( self : str ):
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__A )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def __magic_name__ ( self : Union[str, Any] ):
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def __magic_name__ ( self : Tuple ):
pass
def __magic_name__ ( self : int ):
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(__A )
UpperCAmelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[str] = [*signature.parameters.keys()]
UpperCAmelCase : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = True
for model_class in self.all_model_classes:
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[str] = False
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : List[str] = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Dict = outputs.attentions
UpperCAmelCase : str = sum(self.model_tester.depths )
self.assertEqual(len(__A ), __A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : List[str] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : int = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Optional[int] = outputs.attentions
self.assertEqual(len(__A ), __A )
# verify the first attentions (first block, first layer)
UpperCAmelCase : str = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase : Dict = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], )
# verify the last attentions (last block, last layer)
UpperCAmelCase : str = (self.model_tester.image_size // 3_2) ** 2
UpperCAmelCase : Dict = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ), [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len], )
UpperCAmelCase : Tuple = len(__A )
# Check attention is always last and order is fine
UpperCAmelCase : Any = True
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : str = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : List[Any] = model(**self._prepare_for_class(__A, __A ) )
self.assertEqual(out_len + 1, len(__A ) )
UpperCAmelCase : Union[str, Any] = outputs.attentions
self.assertEqual(len(__A ), __A )
# verify the first attentions (first block, first layer)
UpperCAmelCase : int = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], )
def __magic_name__ ( self : List[Any] ):
def check_hidden_states_output(__A : Optional[int], __A : Union[str, Any], __A : Dict ):
UpperCAmelCase : str = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : List[Any] = outputs.hidden_states
UpperCAmelCase : Any = self.model_tester.num_encoder_blocks
self.assertEqual(len(__A ), __A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ), [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
], )
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Any = True
check_hidden_states_output(__A, __A, __A )
def __magic_name__ ( self : Union[str, Any] ):
if not self.model_tester.is_training:
return
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = True
for model_class in self.all_model_classes:
if model_class in get_values(__A ):
continue
UpperCAmelCase : Dict = model_class(__A )
model.to(__A )
model.train()
UpperCAmelCase : List[str] = self._prepare_for_class(__A, __A, return_labels=__A )
UpperCAmelCase : Optional[Any] = model(**__A ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__ ( self : Dict ):
pass
@slow
def __magic_name__ ( self : Tuple ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = SegformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> Dict:
UpperCAmelCase : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def __magic_name__ ( self : Dict ):
# only resize + normalize
UpperCAmelCase : int = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2), keep_ratio=__A, align=__A, do_random_crop=__A )
UpperCAmelCase : List[Any] = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__A )
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=__A, return_tensors='''pt''' )
UpperCAmelCase : List[str] = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
UpperCAmelCase : Any = model(__A )
UpperCAmelCase : int = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : int = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], __A, atol=1E-4 ) )
@slow
def __magic_name__ ( self : str ):
# only resize + normalize
UpperCAmelCase : Optional[Any] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2), keep_ratio=__A, align=__A, do_random_crop=__A )
UpperCAmelCase : Optional[int] = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(__A )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' )
UpperCAmelCase : Tuple = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
UpperCAmelCase : Any = model(__A )
UpperCAmelCase : str = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], __A, atol=1E-1 ) )
@slow
def __magic_name__ ( self : Any ):
# only resize + normalize
UpperCAmelCase : List[str] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2), keep_ratio=__A, align=__A, do_random_crop=__A )
UpperCAmelCase : Optional[int] = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__A )
UpperCAmelCase : int = prepare_img()
UpperCAmelCase : str = image_processor(images=__A, return_tensors='''pt''' )
UpperCAmelCase : Optional[Any] = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
UpperCAmelCase : Optional[int] = model(__A )
UpperCAmelCase : List[Any] = outputs.logits.detach().cpu()
UpperCAmelCase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__A, target_sizes=[(5_0_0, 3_0_0)] )
UpperCAmelCase : List[str] = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape, __A )
UpperCAmelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=__A )
UpperCAmelCase : Optional[Any] = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape, __A )
| 336 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = LayoutLMTokenizer
UpperCamelCase = LayoutLMTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __magic_name__ ( self : Any ):
super().setUp()
UpperCAmelCase : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__ ( self : Union[str, Any], **__A : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **__A )
def __magic_name__ ( self : Optional[int], __A : int ):
UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running'''
UpperCAmelCase : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __magic_name__ ( self : Any ):
UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__A, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), [7, 4, 5, 1_0, 8, 9] )
def __magic_name__ ( self : Optional[int] ):
pass
| 336 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCamelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class __UpperCAmelCase :
UpperCamelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(default=lowerCamelCase__ , metadata={"""help""": """Whether tp freeze the encoder."""} )
UpperCamelCase = field(default=lowerCamelCase__ , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class __UpperCAmelCase :
UpperCamelCase = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
UpperCamelCase = field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
UpperCamelCase = field(
default=1_0_2_4 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=1_4_2 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
UpperCamelCase = field(
default=1_4_2 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
UpperCamelCase = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
UpperCamelCase = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
UpperCamelCase = field(default=lowerCamelCase__ , metadata={"""help""": """Source language id for translation."""} )
UpperCamelCase = field(default=lowerCamelCase__ , metadata={"""help""": """Target language id for translation."""} )
UpperCamelCase = field(default=lowerCamelCase__ , metadata={"""help""": """# num_beams to use for evaluation."""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : int ) -> Any:
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(UpperCAmelCase , os.path.join(UpperCAmelCase , f'''{split}_results.json''' ) )
def a__ ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()
check_output_dir(UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase : List[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
assert hasattr(UpperCAmelCase , UpperCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(UpperCAmelCase , UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
UpperCAmelCase : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=UpperCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(UpperCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase : str = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(UpperCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(UpperCAmelCase , UpperCAmelCase ):
UpperCAmelCase : Optional[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(UpperCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase : str = SeqaSeqDataset
# Get datasets
UpperCAmelCase : str = (
dataset_class(
UpperCAmelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
UpperCAmelCase : Optional[int] = (
dataset_class(
UpperCAmelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase : int = (
dataset_class(
UpperCAmelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase : int = (
build_compute_metrics_fn(data_args.task , UpperCAmelCase ) if training_args.predict_with_generate else None
)
UpperCAmelCase : List[str] = SeqaSeqTrainer(
model=UpperCAmelCase , args=UpperCAmelCase , data_args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , data_collator=SeqaSeqDataCollator(
UpperCAmelCase , UpperCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCAmelCase , tokenizer=UpperCAmelCase , )
UpperCAmelCase : List[str] = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
UpperCAmelCase : List[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase : List[Any] = train_result.metrics
UpperCAmelCase : Any = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , UpperCAmelCase , training_args.output_dir )
all_metrics.update(UpperCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase : Optional[int] = trainer.evaluate(metric_key_prefix='''val''' )
UpperCAmelCase : List[Any] = data_args.n_val
UpperCAmelCase : Tuple = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , UpperCAmelCase , training_args.output_dir )
all_metrics.update(UpperCAmelCase )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
UpperCAmelCase : Any = trainer.predict(test_dataset=UpperCAmelCase , metric_key_prefix='''test''' )
UpperCAmelCase : Union[str, Any] = test_output.metrics
UpperCAmelCase : int = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase : List[Any] = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , UpperCAmelCase , training_args.output_dir )
all_metrics.update(UpperCAmelCase )
if training_args.predict_with_generate:
UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
UpperCAmelCase : List[Any] = lmap(str.strip , UpperCAmelCase )
write_txt_file(UpperCAmelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(UpperCAmelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def a__ ( UpperCAmelCase : Optional[int] ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 336 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
def __init__( self : Any, __A : str, __A : Dict=1_3, __A : int=3_0, __A : Tuple=2, __A : Union[str, Any]=3, __A : Any=True, __A : str=True, __A : Dict=3_2, __A : List[Any]=2, __A : Optional[Any]=4, __A : Union[str, Any]=3_7, __A : int="gelu", __A : int=0.1, __A : List[Any]=0.1, __A : Tuple=1_0, __A : Tuple=0.0_2, __A : Any=3, __A : List[str]=0.6, __A : Any=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Tuple = mask_ratio
UpperCAmelCase : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase : Tuple = (image_size // patch_size) ** 2
UpperCAmelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Optional[Any] ):
return ViTMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, decoder_hidden_size=self.hidden_size, decoder_num_hidden_layers=self.num_hidden_layers, decoder_num_attention_heads=self.num_attention_heads, decoder_intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, )
def __magic_name__ ( self : str, __A : List[Any], __A : Any, __A : Any ):
UpperCAmelCase : Optional[Any] = TFViTMAEModel(config=__A )
UpperCAmelCase : Tuple = model(__A, training=__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : str, __A : int, __A : str ):
UpperCAmelCase : Dict = TFViTMAEForPreTraining(__A )
UpperCAmelCase : int = model(__A, training=__A )
# expected sequence length = num_patches
UpperCAmelCase : int = (self.image_size // self.patch_size) ** 2
UpperCAmelCase : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase : Tuple = 1
UpperCAmelCase : List[Any] = TFViTMAEForPreTraining(__A )
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = model(__A, training=__A )
UpperCAmelCase : Union[str, Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Union[str, Any] = config_and_inputs
UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCamelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = TFViTMAEModelTester(self )
UpperCAmelCase : int = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[str] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
UpperCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, tf.keras.layers.Layer ) )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : int = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def __magic_name__ ( self : int ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : str = model_class(__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
UpperCAmelCase : Dict = model(__A, noise=__A )
UpperCAmelCase : Any = copy.deepcopy(self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Union[str, Any] = model(**__A, noise=__A )
UpperCAmelCase : Dict = outputs_dict[0].numpy()
UpperCAmelCase : Tuple = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ), 1E-6 )
def __magic_name__ ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : str = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__A : Union[str, Any] ):
UpperCAmelCase : str = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__A ):
UpperCAmelCase : Tuple = v.numpy()
else:
UpperCAmelCase : str = np.array(__A )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
UpperCAmelCase : Any = self._prepare_for_class(__A, __A )
UpperCAmelCase : Optional[int] = prepare_numpy_arrays(__A )
UpperCAmelCase : str = model(__A, noise=__A )
UpperCAmelCase : str = model(**__A, noise=__A )
self.assert_outputs_same(__A, __A )
def __magic_name__ ( self : int, __A : str, __A : Union[str, Any], __A : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase : Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : int = tf.constant(__A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase : List[Any] = tf_noise
super().check_pt_tf_models(__A, __A, __A )
def __magic_name__ ( self : str ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__A )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(__A, __A ),)
if isinstance(__A, __A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__A, '''_keras_serializable''', __A )
}
UpperCAmelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : str = tf.convert_to_tensor(__A )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
UpperCAmelCase : Tuple = main_layer_class(__A )
UpperCAmelCase : int = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCAmelCase : List[Any] = tf.keras.Model(__A, outputs=main_layer(__A ) )
UpperCAmelCase : List[Any] = model(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__A, '''keras_model.h5''' )
model.save(__A )
UpperCAmelCase : List[str] = tf.keras.models.load_model(
__A, custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__A, tf.keras.Model )
UpperCAmelCase : Tuple = model(__A )
self.assert_outputs_same(__A, __A )
@slow
def __magic_name__ ( self : Dict ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[str] = self._prepare_for_class(__A, __A )
UpperCAmelCase : Union[str, Any] = model(__A, noise=__A )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : Optional[int] = outputs.last_hidden_state.numpy()
UpperCAmelCase : Union[str, Any] = 0
else:
UpperCAmelCase : Optional[int] = outputs.logits.numpy()
UpperCAmelCase : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A, saved_model=__A )
UpperCAmelCase : Dict = model_class.from_pretrained(__A )
UpperCAmelCase : str = model(__A, noise=__A )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : int = after_outputs['''last_hidden_state'''].numpy()
UpperCAmelCase : Dict = 0
else:
UpperCAmelCase : Any = after_outputs['''logits'''].numpy()
UpperCAmelCase : Dict = 0
UpperCAmelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A, 1E-5 )
def __magic_name__ ( self : Optional[Any] ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
UpperCAmelCase : List[Any] = model(__A, noise=__A )
UpperCAmelCase : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__A )
UpperCAmelCase : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCAmelCase : str = model_class.from_config(model.config )
UpperCAmelCase : List[str] = new_model(__A ) # Build model
new_model.set_weights(model.get_weights() )
UpperCAmelCase : Tuple = new_model(__A, noise=__A )
self.assert_outputs_same(__A, __A )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __magic_name__ ( self : Tuple ):
pass
@slow
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__A )
def a__ ( ) -> Dict:
UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[str] ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : str ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase : Tuple = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
UpperCAmelCase : List[str] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : str = image_processor(images=__A, return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase : Optional[int] = ViTMAEConfig()
UpperCAmelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCAmelCase : Optional[int] = model(**__A, noise=__A )
# verify the logits
UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : List[str] = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3], __A, atol=1E-4 )
| 336 | 1 |
from __future__ import annotations
def a__ ( UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase )
# We need to create solution object to save path.
UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )]
UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase )
if solved:
print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Dict = len(UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase : Dict = 1
return True
UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase : str = 1
# check for directions
if (
run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase )
or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase )
):
return True
UpperCAmelCase : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
def a__ ( UpperCAmelCase : int ) -> int:
UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , UpperCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_lowerCamelCase : List[Any] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
_lowerCamelCase : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 336 | 1 |
def a__ ( UpperCAmelCase : int ) -> list[int]:
if length <= 0 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(UpperCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 336 |
from __future__ import annotations
def a__ ( UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase )
# We need to create solution object to save path.
UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )]
UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase )
if solved:
print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Dict = len(UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase : Dict = 1
return True
UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase : str = 1
# check for directions
if (
run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase )
or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase )
):
return True
UpperCAmelCase : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 1 |
from __future__ import annotations
import os
from typing import Any
import requests
_lowerCamelCase : Optional[int] = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_lowerCamelCase : str = BASE_URL + "/user"
# https://github.com/settings/tokens
_lowerCamelCase : Any = os.environ.get("USER_TOKEN", "")
def a__ ( UpperCAmelCase : str ) -> dict[Any, Any]:
UpperCAmelCase : Any = {
'''Authorization''': f'''token {auth_token}''',
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(UpperCAmelCase , headers=UpperCAmelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"""{key}: {value}""")
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 336 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __UpperCAmelCase :
def __init__( self : List[Any], __A : List[str], __A : List[str]=1_3, __A : Any=6_4, __A : Optional[Any]=2, __A : str=3, __A : str=True, __A : str=True, __A : Optional[Any]=3_2, __A : List[str]=5, __A : int=4, __A : str=3_7, __A : str="gelu", __A : Dict=0.1, __A : List[Any]=0.1, __A : Dict=1_0, __A : int=0.0_2, __A : Any=[1, 1_6, 4, 4], __A : Optional[int]=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : List[str] = patch_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : int = scope
UpperCAmelCase : List[str] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase : str = (self.image_size // 3_2) ** 2
UpperCAmelCase : List[str] = num_patches + 1
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : str = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Any ):
UpperCAmelCase : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__A, )
def __magic_name__ ( self : Optional[int], __A : Optional[int], __A : int, __A : Tuple ):
UpperCAmelCase : int = ViTHybridModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : Dict, __A : str, __A : List[str] ):
UpperCAmelCase : str = self.type_sequence_label_size
UpperCAmelCase : List[Any] = ViTHybridForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : int ):
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs
UpperCAmelCase : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = ViTHybridModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : int ):
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, nn.Linear ) )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(__A )
UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = _config_zero_init(__A )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(config=__A )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase : Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@slow
def __magic_name__ ( self : List[str] ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Union[str, Any] = ViTHybridModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> Tuple:
UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : str ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
UpperCAmelCase : Tuple = self.default_image_processor
UpperCAmelCase : int = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**__A )
# verify the logits
UpperCAmelCase : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Optional[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
@require_accelerate
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Union[str, Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' )
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=__A, return_tensors='''pt''' )
UpperCAmelCase : Dict = model(**__A )
UpperCAmelCase : Any = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase : Dict = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
| 336 | 1 |
from __future__ import annotations
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> list[int]:
UpperCAmelCase : Any = 0
UpperCAmelCase : Tuple = len(UpperCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCAmelCase : List[str] = i + 1
else:
UpperCAmelCase : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 1_1, 1_5], 9) = }""")
| 336 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def a__ ( ) -> tuple[list[int], int]:
UpperCAmelCase : str = [randint(-1_000 , 1_000 ) for i in range(10 )]
UpperCAmelCase : Any = randint(-5_000 , 5_000 )
return (arr, r)
_lowerCamelCase : Any = make_dataset()
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, ...]:
for triplet in permutations(UpperCAmelCase , 3 ):
if sum(UpperCAmelCase ) == target:
return tuple(sorted(UpperCAmelCase ) )
return (0, 0, 0)
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, int, int]:
arr.sort()
UpperCAmelCase : Tuple = len(UpperCAmelCase )
for i in range(n - 1 ):
UpperCAmelCase , UpperCAmelCase : int = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def a__ ( ) -> tuple[float, float]:
UpperCAmelCase : Union[str, Any] = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
UpperCAmelCase : Tuple = '''
triplet_sum1(*dataset)
'''
UpperCAmelCase : List[str] = '''
triplet_sum2(*dataset)
'''
UpperCAmelCase : Tuple = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 )
UpperCAmelCase : str = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 )
return (min(UpperCAmelCase ), min(UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCamelCase : int = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 336 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Dict ) -> Any:
UpperCAmelCase : List[str] = DPTConfig()
if "large" in checkpoint_url:
UpperCAmelCase : int = 1_024
UpperCAmelCase : Any = 4_096
UpperCAmelCase : Tuple = 24
UpperCAmelCase : Union[str, Any] = 16
UpperCAmelCase : int = [5, 11, 17, 23]
UpperCAmelCase : Union[str, Any] = [256, 512, 1_024, 1_024]
UpperCAmelCase : str = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : Dict = 150
UpperCAmelCase : Any = '''huggingface/label-files'''
UpperCAmelCase : Any = '''ade20k-id2label.json'''
UpperCAmelCase : Dict = json.load(open(cached_download(hf_hub_url(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Any = idalabel
UpperCAmelCase : str = {v: k for k, v in idalabel.items()}
UpperCAmelCase : Any = [1, 150, 480, 480]
return config, expected_shape
def a__ ( UpperCAmelCase : List[Any] ) -> List[str]:
UpperCAmelCase : List[Any] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def a__ ( UpperCAmelCase : Any ) -> Dict:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase : Union[str, Any] = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
UpperCAmelCase : Optional[int] = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
UpperCAmelCase : Any = name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
UpperCAmelCase : Optional[Any] = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
UpperCAmelCase : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
UpperCAmelCase : Optional[Any] = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
UpperCAmelCase : Dict = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
UpperCAmelCase : Any = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase : List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
UpperCAmelCase : Tuple = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
UpperCAmelCase : Any = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
UpperCAmelCase : Any = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
UpperCAmelCase : Dict = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
UpperCAmelCase : Optional[Any] = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
UpperCAmelCase : str = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
UpperCAmelCase : List[Any] = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
UpperCAmelCase : Union[str, Any] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase : Tuple = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
UpperCAmelCase : List[str] = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
UpperCAmelCase : str = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
UpperCAmelCase : List[str] = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
UpperCAmelCase : str = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase : str = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase : Dict = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase : List[Any] = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase : List[Any] = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase : List[str] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase : Optional[Any] = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase : Optional[int] = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase : List[Any] = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase : Optional[int] = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
UpperCAmelCase : int = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
UpperCAmelCase : int = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
UpperCAmelCase : Any = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
UpperCAmelCase : List[str] = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
UpperCAmelCase : Any = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def a__ ( UpperCAmelCase : Any , UpperCAmelCase : List[Any] ) -> List[str]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Tuple = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
UpperCAmelCase : Dict = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Any = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Any = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : str = in_proj_bias[-config.hidden_size :]
def a__ ( ) -> str:
UpperCAmelCase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : int ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase : List[str] = get_dpt_config(UpperCAmelCase )
# load original state_dict from URL
UpperCAmelCase : int = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(UpperCAmelCase )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase : Union[str, Any] = state_dict.pop(UpperCAmelCase )
UpperCAmelCase : Tuple = val
# read in qkv matrices
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase )
# load HuggingFace model
UpperCAmelCase : Optional[int] = DPTForSemanticSegmentation(UpperCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
# Check outputs on an image
UpperCAmelCase : Tuple = 480 if '''ade''' in checkpoint_url else 384
UpperCAmelCase : str = DPTImageProcessor(size=UpperCAmelCase )
UpperCAmelCase : str = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(UpperCAmelCase , return_tensors='''pt''' )
# forward pass
UpperCAmelCase : List[Any] = model(**UpperCAmelCase ).logits if '''ade''' in checkpoint_url else model(**UpperCAmelCase ).predicted_depth
# Assert logits
UpperCAmelCase : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
UpperCAmelCase : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(UpperCAmelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , UpperCAmelCase , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , UpperCAmelCase )
)
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 336 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __UpperCAmelCase :
def __magic_name__ ( self : int, __A : Dict ):
raise NotImplementedError()
def __magic_name__ ( self : int ):
raise NotImplementedError()
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : str, __A : "AutoTokenizer", __A : bool = False, **__A : str ):
UpperCAmelCase : List[str] = tokenizer
UpperCAmelCase : str = skip_prompt
UpperCAmelCase : List[str] = decode_kwargs
# variables used in the streaming process
UpperCAmelCase : Dict = []
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = True
def __magic_name__ ( self : Dict, __A : Optional[int] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
UpperCAmelCase : Union[str, Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase : Optional[int] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase : Any = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase : Optional[Any] = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def __magic_name__ ( self : str ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
UpperCAmelCase : int = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
UpperCAmelCase : Dict = text[self.print_len :]
UpperCAmelCase : List[Any] = []
UpperCAmelCase : List[Any] = 0
else:
UpperCAmelCase : Dict = ''''''
UpperCAmelCase : str = True
self.on_finalized_text(__A, stream_end=__A )
def __magic_name__ ( self : List[str], __A : str, __A : bool = False ):
print(__A, flush=__A, end='''''' if not stream_end else None )
def __magic_name__ ( self : List[Any], __A : Optional[int] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Dict, __A : "AutoTokenizer", __A : bool = False, __A : Optional[float] = None, **__A : str ):
super().__init__(__A, __A, **__A )
UpperCAmelCase : Dict = Queue()
UpperCAmelCase : Any = None
UpperCAmelCase : Any = timeout
def __magic_name__ ( self : Dict, __A : str, __A : bool = False ):
self.text_queue.put(__A, timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal, timeout=self.timeout )
def __iter__( self : int ):
return self
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 336 | 1 |
def a__ ( UpperCAmelCase : int ) -> int:
UpperCAmelCase : str = abs(UpperCAmelCase )
UpperCAmelCase : Tuple = 0
while n > 0:
res += n % 10
n //= 10
return res
def a__ ( UpperCAmelCase : int ) -> int:
UpperCAmelCase : str = abs(UpperCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def a__ ( UpperCAmelCase : int ) -> int:
return sum(int(UpperCAmelCase ) for c in str(abs(UpperCAmelCase ) ) )
def a__ ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCAmelCase : Callable , UpperCAmelCase : int ) -> None:
UpperCAmelCase : str = f'''{func.__name__}({value})'''
UpperCAmelCase : Dict = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(UpperCAmelCase )} -- {timing:.4f} seconds''' )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCAmelCase , UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 336 |
import numpy
# List of input, output pairs
_lowerCamelCase : Dict = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
_lowerCamelCase : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
_lowerCamelCase : Dict = [2, 4, 1, 5]
_lowerCamelCase : Dict = len(train_data)
_lowerCamelCase : int = 0.0_0_9
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]="train" ) -> Dict:
return calculate_hypothesis_value(UpperCAmelCase , UpperCAmelCase ) - output(
UpperCAmelCase , UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Any:
UpperCAmelCase : str = 0
for i in range(len(UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : str=m ) -> Dict:
UpperCAmelCase : Optional[int] = 0
for i in range(UpperCAmelCase ):
if index == -1:
summation_value += _error(UpperCAmelCase )
else:
summation_value += _error(UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def a__ ( UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase : Dict = summation_of_cost_derivative(UpperCAmelCase , UpperCAmelCase ) / m
return cost_derivative_value
def a__ ( ) -> List[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase : List[str] = 0.000002
UpperCAmelCase : Any = 0
UpperCAmelCase : Dict = 0
while True:
j += 1
UpperCAmelCase : List[Any] = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase ) ):
UpperCAmelCase : List[str] = get_cost_derivative(i - 1 )
UpperCAmelCase : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase , rtol=UpperCAmelCase , ):
break
UpperCAmelCase : int = temp_parameter_vector
print(('''Number of iterations:''', j) )
def a__ ( ) -> List[Any]:
for i in range(len(UpperCAmelCase ) ):
print(('''Actual output value:''', output(UpperCAmelCase , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(UpperCAmelCase , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 336 | 1 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def __magic_name__ ( self : Union[str, Any], __A : Dict=0 ):
UpperCAmelCase : Tuple = np.random.RandomState(__A )
UpperCAmelCase : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__ ( self : str ):
UpperCAmelCase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : List[str] = self.get_dummy_inputs()
UpperCAmelCase : Union[str, Any] = pipe(**__A ).images
UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCAmelCase : List[str] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
UpperCAmelCase : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=__A )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : List[Any] = self.get_dummy_inputs()
UpperCAmelCase : str = pipe(**__A ).images
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCAmelCase : Optional[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
UpperCAmelCase : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : str = self.get_dummy_inputs()
UpperCAmelCase : str = pipe(**__A ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCAmelCase : Optional[int] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
UpperCAmelCase : Tuple = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Dict = self.get_dummy_inputs()
UpperCAmelCase : List[str] = pipe(**__A ).images
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCAmelCase : int = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
UpperCAmelCase : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Optional[Any] = self.get_dummy_inputs()
UpperCAmelCase : Dict = pipe(**__A ).images
UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCAmelCase : Tuple = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
UpperCAmelCase : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Tuple = self.get_dummy_inputs()
UpperCAmelCase : int = pipe(**__A ).images
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCAmelCase : Optional[Any] = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
UpperCAmelCase : int = 3 * [inputs['''prompt''']]
# forward
UpperCAmelCase : Any = pipe(**__A )
UpperCAmelCase : int = output.images[0, -3:, -3:, -1]
UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
UpperCAmelCase : Dict = 3 * [inputs.pop('''prompt''' )]
UpperCAmelCase : List[str] = pipe.tokenizer(
__A, padding='''max_length''', max_length=pipe.tokenizer.model_max_length, truncation=__A, return_tensors='''np''', )
UpperCAmelCase : List[str] = text_inputs['''input_ids''']
UpperCAmelCase : Union[str, Any] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
UpperCAmelCase : int = prompt_embeds
# forward
UpperCAmelCase : Any = pipe(**__A )
UpperCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Tuple = self.get_dummy_inputs()
UpperCAmelCase : Union[str, Any] = 3 * ['''this is a negative prompt''']
UpperCAmelCase : Optional[int] = negative_prompt
UpperCAmelCase : int = 3 * [inputs['''prompt''']]
# forward
UpperCAmelCase : Optional[Any] = pipe(**__A )
UpperCAmelCase : List[str] = output.images[0, -3:, -3:, -1]
UpperCAmelCase : int = self.get_dummy_inputs()
UpperCAmelCase : int = 3 * [inputs.pop('''prompt''' )]
UpperCAmelCase : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCAmelCase : Tuple = pipe.tokenizer(
__A, padding='''max_length''', max_length=pipe.tokenizer.model_max_length, truncation=__A, return_tensors='''np''', )
UpperCAmelCase : Optional[Any] = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
UpperCAmelCase , UpperCAmelCase : Dict = embeds
# forward
UpperCAmelCase : Union[str, Any] = pipe(**__A )
UpperCAmelCase : Optional[Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
@property
def __magic_name__ ( self : Any ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Tuple = ort.SessionOptions()
UpperCAmelCase : int = False
return options
def __magic_name__ ( self : Tuple ):
# using the PNDM scheduler by default
UpperCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''onnx''', safety_checker=__A, feature_extractor=__A, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Union[str, Any] = '''A painting of a squirrel eating a burger'''
np.random.seed(0 )
UpperCAmelCase : Any = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=1_0, output_type='''np''' )
UpperCAmelCase : Tuple = output.images
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase : Union[str, Any] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Union[str, Any] = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', subfolder='''scheduler''', revision='''onnx''' )
UpperCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', revision='''onnx''', scheduler=__A, safety_checker=__A, feature_extractor=__A, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Any = '''open neural network exchange'''
UpperCAmelCase : Optional[int] = np.random.RandomState(0 )
UpperCAmelCase : str = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=1_0, generator=__A, output_type='''np''' )
UpperCAmelCase : Dict = output.images
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase : Optional[Any] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__ ( self : int ):
UpperCAmelCase : Tuple = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', subfolder='''scheduler''', revision='''onnx''' )
UpperCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', revision='''onnx''', scheduler=__A, safety_checker=__A, feature_extractor=__A, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Optional[Any] = '''open neural network exchange'''
UpperCAmelCase : Union[str, Any] = np.random.RandomState(0 )
UpperCAmelCase : Optional[Any] = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=1_0, generator=__A, output_type='''np''' )
UpperCAmelCase : Union[str, Any] = output.images
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase : Optional[int] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[Any] = 0
def test_callback_fn(__A : int, __A : int, __A : np.ndarray ) -> None:
UpperCAmelCase : str = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
UpperCAmelCase : Optional[Any] = latents[0, -3:, -3:, -1]
UpperCAmelCase : Optional[int] = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
UpperCAmelCase : List[str] = latents[0, -3:, -3:, -1]
UpperCAmelCase : Dict = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : int = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', revision='''onnx''', safety_checker=__A, feature_extractor=__A, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Union[str, Any] = '''Andromeda galaxy in a bottle'''
UpperCAmelCase : List[str] = np.random.RandomState(0 )
pipe(
prompt=__A, num_inference_steps=5, guidance_scale=7.5, generator=__A, callback=__A, callback_steps=1, )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', revision='''onnx''', safety_checker=__A, feature_extractor=__A, provider=self.gpu_provider, sess_options=self.gpu_options, )
assert isinstance(__A, __A )
assert pipe.safety_checker is None
UpperCAmelCase : int = pipe('''example prompt''', num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__A )
UpperCAmelCase : int = OnnxStableDiffusionPipeline.from_pretrained(__A )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase : Optional[int] = pipe('''example prompt''', num_inference_steps=2 ).images[0]
assert image is not None
| 336 |
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[Any] = len(UpperCAmelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
UpperCAmelCase : Optional[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCAmelCase : Any = left
UpperCAmelCase : List[str] = point
elif point > right:
UpperCAmelCase : Any = right
UpperCAmelCase : List[str] = point
else:
if item < current_item:
UpperCAmelCase : Optional[int] = point - 1
else:
UpperCAmelCase : str = point + 1
return None
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ) -> Dict:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
elif point > right:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , point + 1 , UpperCAmelCase )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> int:
if collection != sorted(UpperCAmelCase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
_lowerCamelCase : Optional[int] = 0
if debug == 1:
_lowerCamelCase : Dict = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
_lowerCamelCase : List[Any] = 6_7
_lowerCamelCase : Optional[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 336 | 1 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase :
def __init__( self : List[str], __A : List[str], __A : Any=3, __A : List[Any]=3_2, __A : str=3, __A : Tuple=1_0, __A : Optional[Any]=[1_0, 2_0, 3_0, 4_0], __A : List[Any]=[1, 1, 2, 1], __A : List[str]=True, __A : Dict=True, __A : Any="relu", __A : Tuple=3, __A : List[Any]=None, ):
UpperCAmelCase : List[str] = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : List[Any] = image_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Dict = embeddings_size
UpperCAmelCase : Optional[int] = hidden_sizes
UpperCAmelCase : Union[str, Any] = depths
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Dict = num_labels
UpperCAmelCase : List[str] = scope
UpperCAmelCase : Optional[int] = len(__A )
def __magic_name__ ( self : Dict ):
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : int = None
if self.use_labels:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Dict ):
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, )
def __magic_name__ ( self : List[str], __A : int, __A : Union[str, Any], __A : List[str] ):
UpperCAmelCase : Union[str, Any] = TFRegNetModel(config=__A )
UpperCAmelCase : Optional[int] = model(__A, training=__A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2), )
def __magic_name__ ( self : Tuple, __A : int, __A : Optional[Any], __A : Optional[Any] ):
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : List[str] = TFRegNetForImageClassification(__A )
UpperCAmelCase : Union[str, Any] = model(__A, labels=__A, training=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Any ):
UpperCAmelCase : Any = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Any ):
UpperCAmelCase : List[Any] = TFRegNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A )
def __magic_name__ ( self : List[str] ):
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0, reason='''TF does not support backprop for grouped convolutions on CPU.''', )
@slow
def __magic_name__ ( self : Optional[Any] ):
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Dict = [*signature.parameters.keys()]
UpperCAmelCase : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : int ):
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : List[str] ):
def check_hidden_states_output(__A : List[Any], __A : Dict, __A : int ):
UpperCAmelCase : Union[str, Any] = model_class(__A )
UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__A, __A ), training=__A )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__A ), expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], )
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : Tuple = layer_type
UpperCAmelCase : List[str] = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Any = True
check_hidden_states_output(__A, __A, __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__A : Optional[Any], __A : int, __A : List[str], __A : Tuple={} ):
UpperCAmelCase : Optional[Any] = model(__A, return_dict=__A, **__A )
UpperCAmelCase : Any = model(__A, return_dict=__A, **__A ).to_tuple()
def recursive_check(__A : List[str], __A : Optional[int] ):
if isinstance(__A, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__A, __A ):
recursive_check(__A, __A )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__A, __A ) ), msg=(
'''Tuple and dict output are not equal. Difference:'''
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
), )
recursive_check(__A, __A )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : List[Any] = self._prepare_for_class(__A, __A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
check_equivalence(__A, __A, __A )
UpperCAmelCase : str = self._prepare_for_class(__A, __A, return_labels=__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A, return_labels=__A )
check_equivalence(__A, __A, __A )
UpperCAmelCase : Union[str, Any] = self._prepare_for_class(__A, __A )
UpperCAmelCase : Union[str, Any] = self._prepare_for_class(__A, __A )
check_equivalence(__A, __A, __A, {'''output_hidden_states''': True} )
UpperCAmelCase : Any = self._prepare_for_class(__A, __A, return_labels=__A )
UpperCAmelCase : List[Any] = self._prepare_for_class(__A, __A, return_labels=__A )
check_equivalence(__A, __A, __A, {'''output_hidden_states''': True} )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def __magic_name__ ( self : Optional[int] ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : int = TFRegNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> List[Any]:
UpperCAmelCase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Optional[Any] ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : List[Any] = image_processor(images=__A, return_tensors='''tf''' )
# forward pass
UpperCAmelCase : str = model(**__A, training=__A )
# verify the logits
UpperCAmelCase : List[str] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Any = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3], __A, atol=1E-4 )
| 336 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else ''''''
UpperCAmelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''),
(f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any:
for i in range(config.num_hidden_layers ):
UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : str = q_bias
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase : str = gamma_a
UpperCAmelCase : Dict = gamma_a
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : str = val
def a__ ( ) -> Optional[int]:
UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]:
UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True
UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase : List[Any] = 1_024
UpperCAmelCase : Optional[Any] = 4_096
UpperCAmelCase : Any = 24
UpperCAmelCase : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : List[Any] = '''huggingface/label-files'''
UpperCAmelCase : Any = '''rvlcdip-id2label.json'''
UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = idalabel
UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model''']
UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase )
# load HuggingFace model
UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase )
model.eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image
UpperCAmelCase : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase )
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
UpperCAmelCase : str = encoding['''pixel_values''']
UpperCAmelCase : Any = model(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify logits
UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected"
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
if has_lm_head:
UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 336 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowerCamelCase : int = logging.get_logger(__name__)
@dataclass
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : Dict, **__A : int ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase : Any = deprecated_arg[3:]
UpperCAmelCase : List[str] = not kwargs.pop(__A )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCAmelCase : Any = kwargs.pop('''tpu_name''', self.tpu_name )
UpperCAmelCase : str = kwargs.pop('''device_idx''', self.device_idx )
UpperCAmelCase : Any = kwargs.pop('''eager_mode''', self.eager_mode )
UpperCAmelCase : Tuple = kwargs.pop('''use_xla''', self.use_xla )
super().__init__(**__A )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Name of TPU"""} , )
UpperCamelCase = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
UpperCamelCase = field(default=lowerCamelCase__ , metadata={"""help""": """Benchmark models in eager model."""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def __magic_name__ ( self : List[Any] ):
requires_backends(self, ['''tf'''] )
UpperCAmelCase : Any = None
if self.tpu:
try:
if self.tpu_name:
UpperCAmelCase : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
UpperCAmelCase : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
UpperCAmelCase : Optional[Any] = None
return tpu
@cached_property
def __magic_name__ ( self : List[Any] ):
requires_backends(self, ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
UpperCAmelCase : List[str] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx], '''GPU''' )
UpperCAmelCase : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([], '''GPU''' ) # disable GPU
UpperCAmelCase : Optional[Any] = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def __magic_name__ ( self : Optional[int] ):
requires_backends(self, ['''tf'''] )
return self._setup_tpu is not None
@property
def __magic_name__ ( self : Optional[int] ):
requires_backends(self, ['''tf'''] )
return self._setup_strategy
@property
def __magic_name__ ( self : List[Any] ):
requires_backends(self, ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def __magic_name__ ( self : List[Any] ):
requires_backends(self, ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __magic_name__ ( self : Optional[int] ):
return self.n_gpu > 0
| 336 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[int], __A : Optional[int], __A : Any=1_3, __A : str=7, __A : Optional[int]=True, __A : Tuple=True, __A : Union[str, Any]=True, __A : Any=True, __A : Optional[int]=9_9, __A : Tuple=3_2, __A : str=5, __A : Union[str, Any]=4, __A : List[str]=3_7, __A : Tuple="gelu", __A : Optional[int]=0.1, __A : int=0.1, __A : Optional[Any]=5_1_2, __A : int=1_6, __A : Optional[Any]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=4, ):
UpperCAmelCase : Any = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Any = seq_length
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : str = use_attention_mask
UpperCAmelCase : List[str] = use_token_type_ids
UpperCAmelCase : int = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = num_choices
def __magic_name__ ( self : str ):
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase : List[Any] = None
if self.use_attention_mask:
UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Any = None
if self.use_token_type_ids:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = RobertaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__A, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ ( self : int ):
UpperCAmelCase : Any = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = config_and_inputs
UpperCAmelCase : Any = True
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Dict = FlaxRobertaModelTester(self )
@slow
def __magic_name__ ( self : Any ):
for model_class_name in self.all_model_classes:
UpperCAmelCase : Dict = model_class_name.from_pretrained('''roberta-base''', from_pt=__A )
UpperCAmelCase : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
| 336 | 1 |
def a__ ( UpperCAmelCase : int ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {"vocab_file": "vocab.txt"}
_lowerCamelCase : List[str] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase : List[Any] = {
"facebook/esm2_t6_8M_UR50D": 1_0_2_4,
"facebook/esm2_t12_35M_UR50D": 1_0_2_4,
}
def a__ ( UpperCAmelCase : List[str] ) -> Any:
with open(UpperCAmelCase , '''r''' ) as f:
UpperCAmelCase : Dict = f.read().splitlines()
return [l.strip() for l in lines]
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Any, __A : Dict, __A : List[Any]="<unk>", __A : List[str]="<cls>", __A : Any="<pad>", __A : Union[str, Any]="<mask>", __A : int="<eos>", **__A : Tuple, ):
super().__init__(**__A )
UpperCAmelCase : Tuple = load_vocab_file(__A )
UpperCAmelCase : List[Any] = dict(enumerate(self.all_tokens ) )
UpperCAmelCase : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase : Union[str, Any] = unk_token
UpperCAmelCase : Optional[Any] = cls_token
UpperCAmelCase : Optional[int] = pad_token
UpperCAmelCase : Optional[int] = mask_token
UpperCAmelCase : List[str] = eos_token
UpperCAmelCase : Optional[Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __magic_name__ ( self : Tuple, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : List[Any], __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : Optional[Any], **__A : Union[str, Any] ):
return text.split()
def __magic_name__ ( self : Optional[int], __A : Dict=False ):
return len(self._id_to_token )
def __magic_name__ ( self : int ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __magic_name__ ( self : Tuple, __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : Union[str, Any], __A : List[int], __A : Optional[List[int]] = None ):
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : Optional[int] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __magic_name__ ( self : Any, __A : List, __A : Optional[List] = None, __A : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase : Dict = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
mask += [0] * len(__A ) + [1]
return mask
def __magic_name__ ( self : Optional[int], __A : List[Any], __A : Dict ):
UpperCAmelCase : Union[str, Any] = os.path.join(__A, (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(__A, '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __magic_name__ ( self : Dict ):
return self.get_vocab_size(with_added_tokens=__A )
def __magic_name__ ( self : Optional[int], __A : Union[List[str], List[AddedToken]], __A : bool = False ):
return super()._add_tokens(__A, special_tokens=__A )
| 336 | 1 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
def __init__( self : Any, __A : str, __A : Dict=1_3, __A : int=3_0, __A : Tuple=2, __A : Union[str, Any]=3, __A : Any=True, __A : str=True, __A : Dict=3_2, __A : List[Any]=2, __A : Optional[Any]=4, __A : Union[str, Any]=3_7, __A : int="gelu", __A : int=0.1, __A : List[Any]=0.1, __A : Tuple=1_0, __A : Tuple=0.0_2, __A : Any=3, __A : List[str]=0.6, __A : Any=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Tuple = mask_ratio
UpperCAmelCase : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase : Tuple = (image_size // patch_size) ** 2
UpperCAmelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Optional[Any] ):
return ViTMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, decoder_hidden_size=self.hidden_size, decoder_num_hidden_layers=self.num_hidden_layers, decoder_num_attention_heads=self.num_attention_heads, decoder_intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, )
def __magic_name__ ( self : str, __A : List[Any], __A : Any, __A : Any ):
UpperCAmelCase : Optional[Any] = TFViTMAEModel(config=__A )
UpperCAmelCase : Tuple = model(__A, training=__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : str, __A : int, __A : str ):
UpperCAmelCase : Dict = TFViTMAEForPreTraining(__A )
UpperCAmelCase : int = model(__A, training=__A )
# expected sequence length = num_patches
UpperCAmelCase : int = (self.image_size // self.patch_size) ** 2
UpperCAmelCase : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase : Tuple = 1
UpperCAmelCase : List[Any] = TFViTMAEForPreTraining(__A )
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = model(__A, training=__A )
UpperCAmelCase : Union[str, Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Union[str, Any] = config_and_inputs
UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCamelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = TFViTMAEModelTester(self )
UpperCAmelCase : int = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[str] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
UpperCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, tf.keras.layers.Layer ) )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : int = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def __magic_name__ ( self : int ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : str = model_class(__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
UpperCAmelCase : Dict = model(__A, noise=__A )
UpperCAmelCase : Any = copy.deepcopy(self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Union[str, Any] = model(**__A, noise=__A )
UpperCAmelCase : Dict = outputs_dict[0].numpy()
UpperCAmelCase : Tuple = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ), 1E-6 )
def __magic_name__ ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : str = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__A : Union[str, Any] ):
UpperCAmelCase : str = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__A ):
UpperCAmelCase : Tuple = v.numpy()
else:
UpperCAmelCase : str = np.array(__A )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
UpperCAmelCase : Any = self._prepare_for_class(__A, __A )
UpperCAmelCase : Optional[int] = prepare_numpy_arrays(__A )
UpperCAmelCase : str = model(__A, noise=__A )
UpperCAmelCase : str = model(**__A, noise=__A )
self.assert_outputs_same(__A, __A )
def __magic_name__ ( self : int, __A : str, __A : Union[str, Any], __A : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase : Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : int = tf.constant(__A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase : List[Any] = tf_noise
super().check_pt_tf_models(__A, __A, __A )
def __magic_name__ ( self : str ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__A )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(__A, __A ),)
if isinstance(__A, __A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__A, '''_keras_serializable''', __A )
}
UpperCAmelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : str = tf.convert_to_tensor(__A )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
UpperCAmelCase : Tuple = main_layer_class(__A )
UpperCAmelCase : int = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCAmelCase : List[Any] = tf.keras.Model(__A, outputs=main_layer(__A ) )
UpperCAmelCase : List[Any] = model(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__A, '''keras_model.h5''' )
model.save(__A )
UpperCAmelCase : List[str] = tf.keras.models.load_model(
__A, custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__A, tf.keras.Model )
UpperCAmelCase : Tuple = model(__A )
self.assert_outputs_same(__A, __A )
@slow
def __magic_name__ ( self : Dict ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[str] = self._prepare_for_class(__A, __A )
UpperCAmelCase : Union[str, Any] = model(__A, noise=__A )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : Optional[int] = outputs.last_hidden_state.numpy()
UpperCAmelCase : Union[str, Any] = 0
else:
UpperCAmelCase : Optional[int] = outputs.logits.numpy()
UpperCAmelCase : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A, saved_model=__A )
UpperCAmelCase : Dict = model_class.from_pretrained(__A )
UpperCAmelCase : str = model(__A, noise=__A )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : int = after_outputs['''last_hidden_state'''].numpy()
UpperCAmelCase : Dict = 0
else:
UpperCAmelCase : Any = after_outputs['''logits'''].numpy()
UpperCAmelCase : Dict = 0
UpperCAmelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A, 1E-5 )
def __magic_name__ ( self : Optional[Any] ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
UpperCAmelCase : List[Any] = model(__A, noise=__A )
UpperCAmelCase : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__A )
UpperCAmelCase : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCAmelCase : str = model_class.from_config(model.config )
UpperCAmelCase : List[str] = new_model(__A ) # Build model
new_model.set_weights(model.get_weights() )
UpperCAmelCase : Tuple = new_model(__A, noise=__A )
self.assert_outputs_same(__A, __A )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __magic_name__ ( self : Tuple ):
pass
@slow
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__A )
def a__ ( ) -> Dict:
UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[str] ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : str ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase : Tuple = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
UpperCAmelCase : List[str] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : str = image_processor(images=__A, return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase : Optional[int] = ViTMAEConfig()
UpperCAmelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCAmelCase : Optional[int] = model(**__A, noise=__A )
# verify the logits
UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : List[str] = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3], __A, atol=1E-4 )
| 336 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A, '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__A, '''depth_multiplier''' ) )
class __UpperCAmelCase :
def __init__( self : int, __A : List[Any], __A : str=1_3, __A : Dict=3, __A : int=3_2, __A : int=0.2_5, __A : List[str]=8, __A : int=8, __A : Dict=6, __A : str=3_2, __A : Any=True, __A : str=True, __A : int=True, __A : Union[str, Any]="relu6", __A : Any=1_2_8_0, __A : List[Any]=0.1, __A : Optional[Any]=0.0_2, __A : Tuple=True, __A : List[Any]=True, __A : str=1_0, __A : Optional[Any]=None, ):
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : str = image_size
UpperCAmelCase : Optional[int] = depth_multiplier
UpperCAmelCase : Union[str, Any] = depth_divisible_by
UpperCAmelCase : Optional[Any] = min_depth
UpperCAmelCase : List[str] = expand_ratio
UpperCAmelCase : Dict = tf_padding
UpperCAmelCase : str = output_stride
UpperCAmelCase : Union[str, Any] = first_layer_is_expansion
UpperCAmelCase : List[Any] = finegrained_output
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCAmelCase : Optional[Any] = classifier_dropout_prob
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Any = scope
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Dict = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def __magic_name__ ( self : List[Any], __A : Dict, __A : Optional[Any], __A : Optional[int], __A : Union[str, Any] ):
UpperCAmelCase : Any = MobileNetVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
self.parent.assertEqual(
result.pooler_output.shape, (self.batch_size, self.last_hidden_size), )
def __magic_name__ ( self : str, __A : Union[str, Any], __A : Dict, __A : Optional[Any], __A : str ):
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : Any = MobileNetVaForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : List[str], __A : Dict, __A : Dict ):
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Dict = MobileNetVaForSemanticSegmentation(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCAmelCase : Optional[Any] = model(__A, labels=__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = MobileNetVaModelTester(self )
UpperCAmelCase : List[Any] = MobileNetVaConfigTester(self, config_class=__A, has_text_modality=__A )
def __magic_name__ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Tuple ):
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def __magic_name__ ( self : Any ):
pass
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : int ):
def check_hidden_states_output(__A : Any, __A : Optional[Any], __A : str ):
UpperCAmelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Optional[Any] = outputs.hidden_states
UpperCAmelCase : List[Any] = 1_6
self.assertEqual(len(__A ), __A )
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def __magic_name__ ( self : Dict ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = MobileNetVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> int:
UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[Any] ):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A )
UpperCAmelCase : Optional[int] = self.default_image_processor
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : str = model(**__A )
# verify the logits
UpperCAmelCase : int = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = model.to(__A )
UpperCAmelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**__A )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase : Tuple = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
], device=__A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], __A, atol=1E-4 ) )
| 336 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : List[Any] = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ["ConvNextFeatureExtractor"]
_lowerCamelCase : int = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 336 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """codegen"""
UpperCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any, __A : Optional[int]=5_0_4_0_0, __A : Tuple=2_0_4_8, __A : Optional[int]=2_0_4_8, __A : List[str]=4_0_9_6, __A : List[str]=2_8, __A : Union[str, Any]=1_6, __A : Tuple=6_4, __A : Union[str, Any]=None, __A : Union[str, Any]="gelu_new", __A : Any=0.0, __A : Dict=0.0, __A : str=0.0, __A : Optional[int]=1E-5, __A : Any=0.0_2, __A : Any=True, __A : Union[str, Any]=5_0_2_5_6, __A : List[str]=5_0_2_5_6, __A : int=False, **__A : List[Any], ):
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Tuple = n_ctx
UpperCAmelCase : Tuple = n_positions
UpperCAmelCase : Optional[int] = n_embd
UpperCAmelCase : Union[str, Any] = n_layer
UpperCAmelCase : List[str] = n_head
UpperCAmelCase : Tuple = n_inner
UpperCAmelCase : int = rotary_dim
UpperCAmelCase : List[Any] = activation_function
UpperCAmelCase : List[str] = resid_pdrop
UpperCAmelCase : Optional[Any] = embd_pdrop
UpperCAmelCase : str = attn_pdrop
UpperCAmelCase : Tuple = layer_norm_epsilon
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : Any = bos_token_id
UpperCAmelCase : List[str] = eos_token_id
super().__init__(
bos_token_id=__A, eos_token_id=__A, tie_word_embeddings=__A, **__A )
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Any, __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ):
super().__init__(__A, task=__A, patching_specs=__A, use_past=__A )
if not getattr(self._config, '''pad_token_id''', __A ):
# TODO: how to do that better?
UpperCAmelCase : Union[str, Any] = 0
@property
def __magic_name__ ( self : str ):
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__A, direction='''inputs''' )
UpperCAmelCase : int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __magic_name__ ( self : Dict ):
return self._config.n_layer
@property
def __magic_name__ ( self : List[str] ):
return self._config.n_head
def __magic_name__ ( self : str, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ):
UpperCAmelCase : Union[str, Any] = super(__A, self ).generate_dummy_inputs(
__A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase : str = seqlen + 2
UpperCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase : Optional[int] = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase : Optional[Any] = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase : Dict = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 )
return ordered_inputs
@property
def __magic_name__ ( self : Tuple ):
return 1_3
| 336 | 1 |
def a__ ( UpperCAmelCase : int ) -> int:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
UpperCAmelCase : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 336 | 1 |
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[Any] = len(UpperCAmelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
UpperCAmelCase : Optional[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCAmelCase : Any = left
UpperCAmelCase : List[str] = point
elif point > right:
UpperCAmelCase : Any = right
UpperCAmelCase : List[str] = point
else:
if item < current_item:
UpperCAmelCase : Optional[int] = point - 1
else:
UpperCAmelCase : str = point + 1
return None
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ) -> Dict:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
elif point > right:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , point + 1 , UpperCAmelCase )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> int:
if collection != sorted(UpperCAmelCase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
_lowerCamelCase : Optional[int] = 0
if debug == 1:
_lowerCamelCase : Dict = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
_lowerCamelCase : List[Any] = 6_7
_lowerCamelCase : Optional[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 336 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __UpperCAmelCase :
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
@classmethod
def __magic_name__ ( cls : Any ):
return cls()
@dataclass
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@property
def __magic_name__ ( self : Optional[int] ):
return True
@register_to_config
def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ):
pass
def __magic_name__ ( self : Optional[Any] ):
return KarrasVeSchedulerState.create()
def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ):
UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy()
UpperCAmelCase : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, )
def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 )
else:
UpperCAmelCase : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 )
UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape )
UpperCAmelCase : Tuple = sigma + gamma * sigma
UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : int = sample_hat + sigma_hat * model_output
UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output
UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ):
raise NotImplementedError()
| 336 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase :
def __init__( self : List[str], __A : int ):
UpperCAmelCase : list[list[Edge]] = [[] for _ in range(__A )]
UpperCAmelCase : Union[str, Any] = size
def __getitem__( self : Optional[Any], __A : int ):
return iter(self._graph[vertex] )
@property
def __magic_name__ ( self : Union[str, Any] ):
return self._size
def __magic_name__ ( self : int, __A : int, __A : int, __A : int ):
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(__A, __A ) )
def __magic_name__ ( self : int, __A : int, __A : int ):
UpperCAmelCase : List[Any] = deque([start_vertex] )
UpperCAmelCase : list[int | None] = [None] * self.size
UpperCAmelCase : Any = 0
while queue:
UpperCAmelCase : int = queue.popleft()
UpperCAmelCase : List[str] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCAmelCase : Dict = current_distance + edge.weight
UpperCAmelCase : str = distances[edge.destination_vertex]
if (
isinstance(__A, __A )
and new_distance >= dest_vertex_distance
):
continue
UpperCAmelCase : List[str] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __UpperCAmelCase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def a__ ( ) -> Dict:
if os.name == "nt":
UpperCAmelCase : List[str] = CursorInfo()
UpperCAmelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Dict = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def a__ ( ) -> Optional[int]:
if os.name == "nt":
UpperCAmelCase : int = CursorInfo()
UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Any = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def a__ ( ) -> Optional[Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 336 | 1 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_lowerCamelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Dict, **__A : int ):
super().__init__(**__A )
requires_backends(self, '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Any, __A : Union[str, List[str], "Image", List["Image"]], **__A : int ):
return super().__call__(__A, **__A )
def __magic_name__ ( self : int, **__A : List[str] ):
UpperCAmelCase : Dict = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Any = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __magic_name__ ( self : List[str], __A : Optional[int], __A : Tuple=None, __A : Dict="This is a photo of {}." ):
UpperCAmelCase : Any = load_image(__A )
UpperCAmelCase : Optional[Any] = self.image_processor(images=[image], return_tensors=self.framework )
UpperCAmelCase : Optional[Any] = candidate_labels
UpperCAmelCase : Tuple = [hypothesis_template.format(__A ) for x in candidate_labels]
UpperCAmelCase : Tuple = self.tokenizer(__A, return_tensors=self.framework, padding=__A )
UpperCAmelCase : List[Any] = [text_inputs]
return inputs
def __magic_name__ ( self : Optional[int], __A : str ):
UpperCAmelCase : List[Any] = model_inputs.pop('''candidate_labels''' )
UpperCAmelCase : str = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0], __A ):
UpperCAmelCase : Any = text_inputs[0]
else:
# Batching case.
UpperCAmelCase : Optional[Any] = text_inputs[0][0]
UpperCAmelCase : str = self.model(**__A, **__A )
UpperCAmelCase : Optional[int] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def __magic_name__ ( self : List[Any], __A : int ):
UpperCAmelCase : Dict = model_outputs.pop('''candidate_labels''' )
UpperCAmelCase : List[str] = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCAmelCase : Dict = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCAmelCase : Optional[int] = probs.tolist()
if not isinstance(__A, __A ):
UpperCAmelCase : List[Any] = [scores]
elif self.framework == "tf":
UpperCAmelCase : int = stable_softmax(__A, axis=-1 )
UpperCAmelCase : Tuple = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
UpperCAmelCase : Union[str, Any] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__A, __A ), key=lambda __A : -x[0] )
]
return result
| 336 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase : Tuple = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 336 | 1 |
from __future__ import annotations
from typing import Any
def a__ ( UpperCAmelCase : list ) -> int:
if not postfix_notation:
return 0
UpperCAmelCase : Tuple = {'''+''', '''-''', '''*''', '''/'''}
UpperCAmelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
UpperCAmelCase , UpperCAmelCase : str = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCAmelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
from __future__ import annotations
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
UpperCAmelCase : str = number_of_bytes // partitions
UpperCAmelCase : Dict = []
for i in range(UpperCAmelCase ):
UpperCAmelCase : int = i * bytes_per_partition + 1
UpperCAmelCase : Optional[int] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 1 |
import os
# Precomputes a list of the 100 first triangular numbers
_lowerCamelCase : Any = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def a__ ( ) -> Any:
UpperCAmelCase : List[Any] = os.path.dirname(os.path.realpath(UpperCAmelCase ) )
UpperCAmelCase : List[str] = os.path.join(UpperCAmelCase , '''words.txt''' )
UpperCAmelCase : Optional[Any] = ''''''
with open(UpperCAmelCase ) as f:
UpperCAmelCase : List[Any] = f.readline()
UpperCAmelCase : Any = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
UpperCAmelCase : Union[str, Any] = [
word
for word in [sum(ord(UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 336 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_lowerCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def a__ ( UpperCAmelCase : Dict=None ) -> Optional[int]:
if subparsers is not None:
UpperCAmelCase : Tuple = subparsers.add_parser('''tpu-config''' , description=_description )
else:
UpperCAmelCase : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
UpperCAmelCase : Optional[int] = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=UpperCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=UpperCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
UpperCAmelCase : Union[str, Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=UpperCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase : List[str] = defaults.commands
if not args.tpu_name:
UpperCAmelCase : Tuple = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase : int = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCAmelCase : Dict = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ):
UpperCAmelCase : Optional[int] = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
UpperCAmelCase : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , UpperCAmelCase ):
UpperCAmelCase : int = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
UpperCAmelCase : int = '''; '''.join(UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase : Any = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(UpperCAmelCase )}''' )
return
subprocess.run(UpperCAmelCase )
print('''Successfully setup pod.''' )
def a__ ( ) -> Any:
UpperCAmelCase : Any = tpu_command_parser()
UpperCAmelCase : Tuple = parser.parse_args()
tpu_command_launcher(UpperCAmelCase )
| 336 | 1 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class __UpperCAmelCase :
@property
def __magic_name__ ( self : List[Any] ):
return self.get_dummy_input()
@property
def __magic_name__ ( self : str ):
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def __magic_name__ ( self : Any, __A : Tuple=True, __A : Optional[int]=False, __A : Tuple=False, __A : Union[str, Any]=False, ):
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : int = 3_2
UpperCAmelCase : int = (3_2, 3_2)
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = torch.device(__A )
UpperCAmelCase : Union[str, Any] = (batch_size, num_channels) + sizes
UpperCAmelCase : Tuple = randn_tensor(__A, generator=__A, device=__A )
UpperCAmelCase : Optional[Any] = {'''hidden_states''': hidden_states}
if include_temb:
UpperCAmelCase : Optional[int] = 1_2_8
UpperCAmelCase : Union[str, Any] = randn_tensor((batch_size, temb_channels), generator=__A, device=__A )
if include_res_hidden_states_tuple:
UpperCAmelCase : Dict = torch.manual_seed(1 )
UpperCAmelCase : List[Any] = (randn_tensor(__A, generator=__A, device=__A ),)
if include_encoder_hidden_states:
UpperCAmelCase : Tuple = floats_tensor((batch_size, 3_2, 3_2) ).to(__A )
if include_skip_sample:
UpperCAmelCase : Optional[int] = randn_tensor(((batch_size, 3) + sizes), generator=__A, device=__A )
return dummy_input
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[Any] = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
'''temb_channels''': 1_2_8,
}
if self.block_type == "up":
UpperCAmelCase : Optional[Any] = 3_2
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
UpperCAmelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def __magic_name__ ( self : str, __A : Dict ):
UpperCAmelCase , UpperCAmelCase : Tuple = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Optional[int] = self.block_class(**__A )
unet_block.to(__A )
unet_block.eval()
with torch.no_grad():
UpperCAmelCase : Tuple = unet_block(**__A )
if isinstance(__A, __A ):
UpperCAmelCase : int = output[0]
self.assertEqual(output.shape, self.output_shape )
UpperCAmelCase : Optional[int] = output[0, -1, -3:, -3:]
UpperCAmelCase : Union[str, Any] = torch.tensor(__A ).to(__A )
assert torch_all_close(output_slice.flatten(), __A, atol=5E-3 )
@unittest.skipIf(torch_device == '''mps''', '''Training is not supported in mps''' )
def __magic_name__ ( self : Any ):
UpperCAmelCase , UpperCAmelCase : List[Any] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Any = self.block_class(**__A )
model.to(__A )
model.train()
UpperCAmelCase : Tuple = model(**__A )
if isinstance(__A, __A ):
UpperCAmelCase : Dict = output[0]
UpperCAmelCase : List[str] = torch.device(__A )
UpperCAmelCase : str = randn_tensor(output.shape, device=__A )
UpperCAmelCase : Dict = torch.nn.functional.mse_loss(__A, __A )
loss.backward()
| 336 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
print('''Loading config file...''' )
def flatten_yaml_as_dict(UpperCAmelCase : Tuple , UpperCAmelCase : Any="" , UpperCAmelCase : Dict="." ):
UpperCAmelCase : List[str] = []
for k, v in d.items():
UpperCAmelCase : List[Any] = parent_key + sep + k if parent_key else k
if isinstance(UpperCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCAmelCase , UpperCAmelCase , sep=UpperCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCAmelCase )
UpperCAmelCase : List[str] = argparse.Namespace()
with open(UpperCAmelCase , '''r''' ) as yaml_file:
try:
UpperCAmelCase : List[str] = yaml.load(UpperCAmelCase , Loader=yaml.FullLoader )
UpperCAmelCase : Optional[int] = flatten_yaml_as_dict(UpperCAmelCase )
for k, v in flat_cfg.items():
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(UpperCAmelCase , str(UpperCAmelCase ) ) )
return config
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> List[Any]:
UpperCAmelCase : int = MobileViTVaConfig()
UpperCAmelCase : str = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase : Any = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase : Any = 384
else:
UpperCAmelCase : Tuple = 256
UpperCAmelCase : int = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase : Optional[Any] = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase : str = 384
else:
UpperCAmelCase : Dict = 256
UpperCAmelCase : List[Any] = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase : Optional[Any] = 151
UpperCAmelCase : Tuple = 512
UpperCAmelCase : Tuple = '''ade20k-id2label.json'''
UpperCAmelCase : Tuple = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase : Dict = 21
UpperCAmelCase : str = 512
UpperCAmelCase : Union[str, Any] = '''pascal-voc-id2label.json'''
UpperCAmelCase : Dict = True
# orig_config
UpperCAmelCase : List[Any] = load_orig_config_file(UpperCAmelCase )
assert getattr(UpperCAmelCase , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase : Tuple = getattr(UpperCAmelCase , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(UpperCAmelCase , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase : str = getattr(UpperCAmelCase , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase : Any = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase : Union[str, Any] = '''huggingface/label-files'''
UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Any = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : int = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> List[str]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : List[str] = val
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=False ) -> Union[str, Any]:
if base_model:
UpperCAmelCase : Dict = ''''''
else:
UpperCAmelCase : Dict = '''mobilevitv2.'''
UpperCAmelCase : Optional[int] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase : List[str] = k[8:]
else:
UpperCAmelCase : Dict = k
if ".block." in k:
UpperCAmelCase : List[Any] = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase : List[str] = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('''conv_1.''' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase : int = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
UpperCAmelCase : Any = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
UpperCAmelCase : str = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
UpperCAmelCase : int = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase : Dict = [0, 1]
elif i == 4:
UpperCAmelCase : Dict = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase : int = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
UpperCAmelCase : Optional[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
UpperCAmelCase : Any = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase : Any = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase : Tuple = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Any:
UpperCAmelCase : str = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(UpperCAmelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = get_mobilevitva_config(UpperCAmelCase , UpperCAmelCase )
# load original state_dict
UpperCAmelCase : List[str] = torch.load(UpperCAmelCase , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase : str = MobileViTVaForSemanticSegmentation(UpperCAmelCase ).eval()
UpperCAmelCase : str = False
else:
UpperCAmelCase : Union[str, Any] = MobileViTVaForImageClassification(UpperCAmelCase ).eval()
UpperCAmelCase : Any = False
# remove and rename some keys of load the original model
UpperCAmelCase : Optional[Any] = checkpoint
remove_unused_keys(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = create_rename_keys(UpperCAmelCase , base_model=UpperCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# load modified state_dict
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase : Any = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase : Union[str, Any] = model(**UpperCAmelCase )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase : Optional[Any] = outputs.logits
UpperCAmelCase : int = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase : str = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1E-4 )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 336 | 1 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 336 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __UpperCAmelCase ( lowerCamelCase__ ):
def __get__( self : Tuple, __A : Optional[Any], __A : Optional[int]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCAmelCase : str = '''__cached_''' + self.fget.__name__
UpperCAmelCase : int = getattr(__A, __A, __A )
if cached is None:
UpperCAmelCase : Any = self.fget(__A )
setattr(__A, __A, __A )
return cached
def a__ ( UpperCAmelCase : Optional[Any] ) -> Any:
UpperCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_torch_fx_proxy(UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Union[str, Any]:
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : str ) -> Tuple:
return _is_numpy(UpperCAmelCase )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
import torch
return isinstance(UpperCAmelCase , torch.Tensor )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
return False if not is_torch_available() else _is_torch(UpperCAmelCase )
def a__ ( UpperCAmelCase : Tuple ) -> List[str]:
import torch
return isinstance(UpperCAmelCase , torch.device )
def a__ ( UpperCAmelCase : Any ) -> Any:
return False if not is_torch_available() else _is_torch_device(UpperCAmelCase )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ):
if hasattr(UpperCAmelCase , UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
else:
return False
return isinstance(UpperCAmelCase , torch.dtype )
def a__ ( UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase )
def a__ ( UpperCAmelCase : Any ) -> str:
import tensorflow as tf
return isinstance(UpperCAmelCase , tf.Tensor )
def a__ ( UpperCAmelCase : int ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[str] ) -> Tuple:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCAmelCase , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(UpperCAmelCase )
return type(UpperCAmelCase ) == tf.Tensor
def a__ ( UpperCAmelCase : int ) -> List[Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[Any] ) -> Dict:
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCAmelCase , jnp.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]:
return False if not is_flax_available() else _is_jax(UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Tuple:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return [to_py_obj(UpperCAmelCase ) for o in obj]
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase ).tolist()
elif isinstance(UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( UpperCAmelCase : Any ) -> List[str]:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return np.array(UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase )
else:
return obj
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(__A ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase : int = getattr(self, class_fields[0].name )
UpperCAmelCase : str = all(getattr(self, field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__A ):
if isinstance(__A, __A ):
UpperCAmelCase : Tuple = first_field.items()
UpperCAmelCase : Any = True
else:
try:
UpperCAmelCase : Optional[Any] = iter(__A )
UpperCAmelCase : Optional[Any] = True
except TypeError:
UpperCAmelCase : Optional[int] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__A ):
if (
not isinstance(__A, (list, tuple) )
or not len(__A ) == 2
or not isinstance(element[0], __A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self, element[0], element[1] )
if element[1] is not None:
UpperCAmelCase : Union[str, Any] = element[1]
elif first_field is not None:
UpperCAmelCase : Union[str, Any] = first_field
else:
for field in class_fields:
UpperCAmelCase : Optional[Any] = getattr(self, field.name )
if v is not None:
UpperCAmelCase : Optional[int] = v
def __delitem__( self : Union[str, Any], *__A : str, **__A : Tuple ):
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : List[str], *__A : Union[str, Any], **__A : Optional[Any] ):
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Any, *__A : Dict, **__A : str ):
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Dict, *__A : int, **__A : Dict ):
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[str], __A : List[str] ):
if isinstance(__A, __A ):
UpperCAmelCase : int = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any], __A : Dict, __A : Union[str, Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__A, __A )
super().__setattr__(__A, __A )
def __setitem__( self : Dict, __A : List[Any], __A : Union[str, Any] ):
# Will raise a KeyException if needed
super().__setitem__(__A, __A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__A, __A )
def __magic_name__ ( self : List[str] ):
return tuple(self[k] for k in self.keys() )
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@classmethod
def __magic_name__ ( cls : List[Any], __A : Tuple ):
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """longest"""
UpperCamelCase = """max_length"""
UpperCamelCase = """do_not_pad"""
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """pt"""
UpperCamelCase = """tf"""
UpperCamelCase = """np"""
UpperCamelCase = """jax"""
class __UpperCAmelCase :
def __init__( self : Any, __A : List[ContextManager] ):
UpperCAmelCase : Tuple = context_managers
UpperCAmelCase : Tuple = ExitStack()
def __enter__( self : Any ):
for context_manager in self.context_managers:
self.stack.enter_context(__A )
def __exit__( self : List[Any], *__A : Union[str, Any], **__A : Dict ):
self.stack.__exit__(*__A, **__A )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> str:
UpperCAmelCase : int = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( UpperCAmelCase : Dict ) -> Any:
UpperCAmelCase : List[Any] = model_class.__name__
UpperCAmelCase : Union[str, Any] = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( UpperCAmelCase : MutableMapping , UpperCAmelCase : str = "" , UpperCAmelCase : str = "." ) -> Union[str, Any]:
def _flatten_dict(UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]="" , UpperCAmelCase : Any="." ):
for k, v in d.items():
UpperCAmelCase : List[str] = str(UpperCAmelCase ) + delimiter + str(UpperCAmelCase ) if parent_key else k
if v and isinstance(UpperCAmelCase , UpperCAmelCase ):
yield from flatten_dict(UpperCAmelCase , UpperCAmelCase , delimiter=UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
@contextmanager
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : bool = False ) -> Optional[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ) -> Optional[Any]:
if is_numpy_array(UpperCAmelCase ):
return np.transpose(UpperCAmelCase , axes=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.T if axes is None else array.permute(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(UpperCAmelCase , perm=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.transpose(UpperCAmelCase , axes=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.reshape(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.reshape(UpperCAmelCase , UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None ) -> Any:
if is_numpy_array(UpperCAmelCase ):
return np.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> str:
if is_numpy_array(UpperCAmelCase ):
return np.expand_dims(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.unsqueeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.size(UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.size(UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Dict:
for key, value in auto_map.items():
if isinstance(UpperCAmelCase , (tuple, list) ):
UpperCAmelCase : List[Any] = [f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase : List[Any] = f'''{repo_id}--{value}'''
return auto_map
def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]:
for base_class in inspect.getmro(UpperCAmelCase ):
UpperCAmelCase : Any = base_class.__module__
UpperCAmelCase : Dict = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 336 | 1 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowerCamelCase : Optional[Any] = 2_5_6_0_4_7
_lowerCamelCase : List[Any] = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = NllbTokenizer
UpperCamelCase = NllbTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = {}
def __magic_name__ ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[Any] = NllbTokenizer(__A, keep_accents=__A )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Dict = NllbTokenizer(__A, keep_accents=__A )
UpperCAmelCase : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__A, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ), [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]], )
UpperCAmelCase : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__A, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A, [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
], )
UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def __magic_name__ ( self : str ):
UpperCAmelCase : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(__A, **__A )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(__A, **__A )
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(__A )
UpperCAmelCase : Dict = tokenizer_p.save_pretrained(__A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
UpperCAmelCase : List[Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__A, __A )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(__A )
UpperCAmelCase : int = tokenizer_p.from_pretrained(__A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__A, __A ) )
shutil.rmtree(__A )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase : int = tempfile.mkdtemp()
UpperCAmelCase : List[Any] = tokenizer_r.save_pretrained(__A, legacy_format=__A )
UpperCAmelCase : Union[str, Any] = tokenizer_p.save_pretrained(__A )
# Checks it save with the same files
self.assertSequenceEqual(__A, __A )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(__A )
UpperCAmelCase : List[Any] = tokenizer_p.from_pretrained(__A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__A, __A ) )
shutil.rmtree(__A )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase : Tuple = tokenizer_r.save_pretrained(__A, legacy_format=__A )
UpperCAmelCase : str = tokenizer_p.save_pretrained(__A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase : Any = tokenizer_r.from_pretrained(__A )
UpperCAmelCase : Dict = tokenizer_p.from_pretrained(__A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__A, __A ) )
shutil.rmtree(__A )
@require_torch
def __magic_name__ ( self : Optional[int] ):
if not self.test_seqaseq:
return
UpperCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Longer text that will definitely require truncation.
UpperCAmelCase : Dict = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
UpperCAmelCase : List[str] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
UpperCAmelCase : int = tokenizer.prepare_seqaseq_batch(
src_texts=__A, tgt_texts=__A, max_length=3, max_target_length=1_0, return_tensors='''pt''', src_lang='''eng_Latn''', tgt_lang='''ron_Latn''', )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.labels.shape[1], 1_0 )
# max_target_length will default to max_length if not specified
UpperCAmelCase : Tuple = tokenizer.prepare_seqaseq_batch(
__A, tgt_texts=__A, max_length=3, return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.labels.shape[1], 3 )
UpperCAmelCase : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
src_texts=__A, max_length=3, max_target_length=1_0, return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3 )
self.assertNotIn('''decoder_input_ids''', __A )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def __magic_name__ ( self : Dict ):
pass
def __magic_name__ ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Any = [AddedToken('''<special>''', lstrip=__A )]
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__A, additional_special_tokens=__A, **__A )
UpperCAmelCase : Dict = tokenizer_r.encode('''Hey this is a <special> token''' )
UpperCAmelCase : Any = tokenizer_r.encode('''<special>''', add_special_tokens=__A )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(
__A, additional_special_tokens=__A, **__A, )
UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained(
__A, additional_special_tokens=__A, **__A )
UpperCAmelCase : Union[str, Any] = tokenizer_p.encode('''Hey this is a <special> token''' )
UpperCAmelCase : Union[str, Any] = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(__A, __A )
self.assertEqual(__A, __A )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = """facebook/nllb-200-distilled-600M"""
UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCamelCase = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def __magic_name__ ( cls : Optional[int] ):
UpperCAmelCase : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''eng_Latn''', tgt_lang='''ron_Latn''' )
UpperCAmelCase : Tuple = 1
return cls
def __magic_name__ ( self : List[Any] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''], 2_5_6_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''], 2_5_6_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''], 2_5_6_0_5_7 )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, __A )
def __magic_name__ ( self : List[str] ):
self.assertIn(__A, self.tokenizer.all_special_ids )
# fmt: off
UpperCAmelCase : Any = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7]
# fmt: on
UpperCAmelCase : Union[str, Any] = self.tokenizer.decode(__A, skip_special_tokens=__A )
UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=__A )
self.assertEqual(__A, __A )
self.assertNotIn(self.tokenizer.eos_token, __A )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Optional[int] = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0], __A )
UpperCAmelCase : Optional[Any] = 1_0
UpperCAmelCase : int = self.tokenizer(__A, max_length=__A, truncation=__A ).input_ids[0]
self.assertEqual(ids[-1], 2 )
self.assertEqual(ids[0], __A )
self.assertEqual(len(__A ), __A )
def __magic_name__ ( self : List[str] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [2_5_6_2_0_3, 3] )
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__A )
UpperCAmelCase : Any = NllbTokenizer.from_pretrained(__A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, __A )
@require_torch
def __magic_name__ ( self : Any ):
UpperCAmelCase : Dict = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=__A, truncation=__A, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', )
UpperCAmelCase : Union[str, Any] = shift_tokens_right(
batch['''labels'''], self.tokenizer.pad_token_id, self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(__A, __A )
self.assertEqual((2, 1_5), batch.input_ids.shape )
self.assertEqual((2, 1_5), batch.attention_mask.shape )
UpperCAmelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, __A )
self.assertEqual(__A, batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text, padding=__A, truncation=__A, max_length=3, return_tensors='''pt''' )
UpperCAmelCase : Union[str, Any] = self.tokenizer(
text_target=self.tgt_text, padding=__A, truncation=__A, max_length=1_0, return_tensors='''pt''' )
UpperCAmelCase : Tuple = targets['''input_ids''']
UpperCAmelCase : List[Any] = shift_tokens_right(
__A, self.tokenizer.pad_token_id, decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang], )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 1_0 )
@require_torch
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Dict = self.tokenizer._build_translation_inputs(
'''A test''', return_tensors='''pt''', src_lang='''eng_Latn''', tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(__A ), {
# A, test, EOS, en_XX
'''input_ids''': [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_6_0_5_7,
}, )
@require_torch
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Tuple = True
UpperCAmelCase : int = self.tokenizer(
'''UN Chief says there is no military solution in Syria''', src_lang='''eng_Latn''', tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids, [1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7] )
UpperCAmelCase : int = False
UpperCAmelCase : Optional[int] = self.tokenizer(
'''UN Chief says there is no military solution in Syria''', src_lang='''eng_Latn''', tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids, [2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2] )
| 336 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = LayoutLMTokenizer
UpperCamelCase = LayoutLMTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __magic_name__ ( self : Any ):
super().setUp()
UpperCAmelCase : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__ ( self : Union[str, Any], **__A : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **__A )
def __magic_name__ ( self : Optional[int], __A : int ):
UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running'''
UpperCAmelCase : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __magic_name__ ( self : Any ):
UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__A, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), [7, 4, 5, 1_0, 8, 9] )
def __magic_name__ ( self : Optional[int] ):
pass
| 336 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase : Tuple = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 336 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
def __init__( self : Any, __A : str, __A : Dict=1_3, __A : int=3_0, __A : Tuple=2, __A : Union[str, Any]=3, __A : Any=True, __A : str=True, __A : Dict=3_2, __A : List[Any]=2, __A : Optional[Any]=4, __A : Union[str, Any]=3_7, __A : int="gelu", __A : int=0.1, __A : List[Any]=0.1, __A : Tuple=1_0, __A : Tuple=0.0_2, __A : Any=3, __A : List[str]=0.6, __A : Any=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Tuple = mask_ratio
UpperCAmelCase : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase : Tuple = (image_size // patch_size) ** 2
UpperCAmelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Optional[Any] ):
return ViTMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, decoder_hidden_size=self.hidden_size, decoder_num_hidden_layers=self.num_hidden_layers, decoder_num_attention_heads=self.num_attention_heads, decoder_intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, )
def __magic_name__ ( self : str, __A : List[Any], __A : Any, __A : Any ):
UpperCAmelCase : Optional[Any] = TFViTMAEModel(config=__A )
UpperCAmelCase : Tuple = model(__A, training=__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : str, __A : int, __A : str ):
UpperCAmelCase : Dict = TFViTMAEForPreTraining(__A )
UpperCAmelCase : int = model(__A, training=__A )
# expected sequence length = num_patches
UpperCAmelCase : int = (self.image_size // self.patch_size) ** 2
UpperCAmelCase : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase : Tuple = 1
UpperCAmelCase : List[Any] = TFViTMAEForPreTraining(__A )
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = model(__A, training=__A )
UpperCAmelCase : Union[str, Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Union[str, Any] = config_and_inputs
UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCamelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = TFViTMAEModelTester(self )
UpperCAmelCase : int = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[str] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
UpperCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, tf.keras.layers.Layer ) )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : int = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def __magic_name__ ( self : int ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : str = model_class(__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
UpperCAmelCase : Dict = model(__A, noise=__A )
UpperCAmelCase : Any = copy.deepcopy(self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Union[str, Any] = model(**__A, noise=__A )
UpperCAmelCase : Dict = outputs_dict[0].numpy()
UpperCAmelCase : Tuple = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ), 1E-6 )
def __magic_name__ ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : str = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__A : Union[str, Any] ):
UpperCAmelCase : str = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__A ):
UpperCAmelCase : Tuple = v.numpy()
else:
UpperCAmelCase : str = np.array(__A )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
UpperCAmelCase : Any = self._prepare_for_class(__A, __A )
UpperCAmelCase : Optional[int] = prepare_numpy_arrays(__A )
UpperCAmelCase : str = model(__A, noise=__A )
UpperCAmelCase : str = model(**__A, noise=__A )
self.assert_outputs_same(__A, __A )
def __magic_name__ ( self : int, __A : str, __A : Union[str, Any], __A : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase : Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : int = tf.constant(__A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase : List[Any] = tf_noise
super().check_pt_tf_models(__A, __A, __A )
def __magic_name__ ( self : str ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__A )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(__A, __A ),)
if isinstance(__A, __A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__A, '''_keras_serializable''', __A )
}
UpperCAmelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : str = tf.convert_to_tensor(__A )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
UpperCAmelCase : Tuple = main_layer_class(__A )
UpperCAmelCase : int = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCAmelCase : List[Any] = tf.keras.Model(__A, outputs=main_layer(__A ) )
UpperCAmelCase : List[Any] = model(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__A, '''keras_model.h5''' )
model.save(__A )
UpperCAmelCase : List[str] = tf.keras.models.load_model(
__A, custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__A, tf.keras.Model )
UpperCAmelCase : Tuple = model(__A )
self.assert_outputs_same(__A, __A )
@slow
def __magic_name__ ( self : Dict ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[str] = self._prepare_for_class(__A, __A )
UpperCAmelCase : Union[str, Any] = model(__A, noise=__A )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : Optional[int] = outputs.last_hidden_state.numpy()
UpperCAmelCase : Union[str, Any] = 0
else:
UpperCAmelCase : Optional[int] = outputs.logits.numpy()
UpperCAmelCase : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A, saved_model=__A )
UpperCAmelCase : Dict = model_class.from_pretrained(__A )
UpperCAmelCase : str = model(__A, noise=__A )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : int = after_outputs['''last_hidden_state'''].numpy()
UpperCAmelCase : Dict = 0
else:
UpperCAmelCase : Any = after_outputs['''logits'''].numpy()
UpperCAmelCase : Dict = 0
UpperCAmelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A, 1E-5 )
def __magic_name__ ( self : Optional[Any] ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
UpperCAmelCase : List[Any] = model(__A, noise=__A )
UpperCAmelCase : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__A )
UpperCAmelCase : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCAmelCase : str = model_class.from_config(model.config )
UpperCAmelCase : List[str] = new_model(__A ) # Build model
new_model.set_weights(model.get_weights() )
UpperCAmelCase : Tuple = new_model(__A, noise=__A )
self.assert_outputs_same(__A, __A )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __magic_name__ ( self : Tuple ):
pass
@slow
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__A )
def a__ ( ) -> Dict:
UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[str] ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : str ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase : Tuple = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
UpperCAmelCase : List[str] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : str = image_processor(images=__A, return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase : Optional[int] = ViTMAEConfig()
UpperCAmelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCAmelCase : Optional[int] = model(**__A, noise=__A )
# verify the logits
UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : List[str] = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3], __A, atol=1E-4 )
| 336 | 1 |
import csv
import tweepy
# Twitter API credentials
_lowerCamelCase : Union[str, Any] = ""
_lowerCamelCase : Tuple = ""
_lowerCamelCase : Dict = ""
_lowerCamelCase : Tuple = ""
def a__ ( UpperCAmelCase : str ) -> None:
# authorize twitter, initialize tweepy
UpperCAmelCase : List[str] = tweepy.OAuthHandler(UpperCAmelCase , UpperCAmelCase )
auth.set_access_token(UpperCAmelCase , UpperCAmelCase )
UpperCAmelCase : List[str] = tweepy.API(UpperCAmelCase )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase : int = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase : int = api.user_timeline(screen_name=UpperCAmelCase , count=200 )
# save most recent tweets
alltweets.extend(UpperCAmelCase )
# save the id of the oldest tweet less one
UpperCAmelCase : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(UpperCAmelCase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase : Any = api.user_timeline(
screen_name=UpperCAmelCase , count=200 , max_id=UpperCAmelCase )
# save most recent tweets
alltweets.extend(UpperCAmelCase )
# update the id of the oldest tweet less one
UpperCAmelCase : Tuple = alltweets[-1].id - 1
print(f'''...{len(UpperCAmelCase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase : Optional[int] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
UpperCAmelCase : int = csv.writer(UpperCAmelCase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(UpperCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 336 |
def a__ ( UpperCAmelCase : int ) -> int:
UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , UpperCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_lowerCamelCase : List[Any] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
_lowerCamelCase : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 336 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : Optional[int] = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """mask2former"""
UpperCamelCase = ["""swin"""]
UpperCamelCase = {"""hidden_size""": """hidden_dim"""}
def __init__( self : Optional[Any], __A : Optional[Dict] = None, __A : int = 2_5_6, __A : int = 2_5_6, __A : int = 2_5_6, __A : int = 1_0_2_4, __A : str = "relu", __A : int = 6, __A : int = 1_0, __A : int = 8, __A : float = 0.0, __A : int = 2_0_4_8, __A : bool = False, __A : bool = False, __A : int = 4, __A : int = 2_5_5, __A : int = 1_0_0, __A : float = 0.1, __A : float = 2.0, __A : float = 5.0, __A : float = 5.0, __A : int = 1_2_5_4_4, __A : float = 3.0, __A : float = 0.7_5, __A : float = 0.0_2, __A : float = 1.0, __A : bool = True, __A : List[int] = [4, 8, 1_6, 3_2], __A : bool = None, **__A : Union[str, Any], ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
UpperCAmelCase : Union[str, Any] = CONFIG_MAPPING['''swin'''](
image_size=2_2_4, in_channels=3, patch_size=4, embed_dim=9_6, depths=[2, 2, 1_8, 2], num_heads=[3, 6, 1_2, 2_4], window_size=7, drop_path_rate=0.3, use_absolute_embeddings=__A, out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''], )
if isinstance(__A, __A ):
UpperCAmelCase : Any = backbone_config.pop('''model_type''' )
UpperCAmelCase : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : str = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
UpperCAmelCase : Optional[int] = backbone_config
UpperCAmelCase : Optional[Any] = feature_size
UpperCAmelCase : Any = mask_feature_size
UpperCAmelCase : Optional[Any] = hidden_dim
UpperCAmelCase : List[Any] = encoder_feedforward_dim
UpperCAmelCase : List[str] = activation_function
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : int = decoder_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : Optional[int] = dropout
UpperCAmelCase : int = dim_feedforward
UpperCAmelCase : Tuple = pre_norm
UpperCAmelCase : Optional[int] = enforce_input_projection
UpperCAmelCase : List[Any] = common_stride
UpperCAmelCase : Dict = ignore_value
UpperCAmelCase : str = num_queries
UpperCAmelCase : List[Any] = no_object_weight
UpperCAmelCase : Tuple = class_weight
UpperCAmelCase : Optional[int] = mask_weight
UpperCAmelCase : Dict = dice_weight
UpperCAmelCase : List[str] = train_num_points
UpperCAmelCase : List[str] = oversample_ratio
UpperCAmelCase : Any = importance_sample_ratio
UpperCAmelCase : Dict = init_std
UpperCAmelCase : str = init_xavier_std
UpperCAmelCase : str = use_auxiliary_loss
UpperCAmelCase : Union[str, Any] = feature_strides
UpperCAmelCase : int = output_auxiliary_logits
UpperCAmelCase : Dict = decoder_layers
super().__init__(**__A )
@classmethod
def __magic_name__ ( cls : str, __A : PretrainedConfig, **__A : Any ):
return cls(
backbone_config=__A, **__A, )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[str] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 336 |
from __future__ import annotations
def a__ ( UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase )
# We need to create solution object to save path.
UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )]
UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase )
if solved:
print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Dict = len(UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase : Dict = 1
return True
UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase : str = 1
# check for directions
if (
run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase )
or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase )
):
return True
UpperCAmelCase : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 1 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_lowerCamelCase : Any = logging.getLogger(__name__)
_lowerCamelCase : Union[str, Any] = "pytorch_model.bin"
@dataclasses.dataclass
class __UpperCAmelCase :
UpperCamelCase = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} )
UpperCamelCase = dataclasses.field(
default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , )
@dataclasses.dataclass
class __UpperCAmelCase :
UpperCamelCase = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} )
UpperCamelCase = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} )
UpperCamelCase = dataclasses.field(
default=lowerCamelCase__ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
UpperCamelCase = dataclasses.field(
default=lowerCamelCase__ , metadata={"""help""": """The name of the task to train on."""} , )
UpperCamelCase = dataclasses.field(
default=lowerCamelCase__ , metadata={"""help""": """The list of labels for the task."""} )
@dataclasses.dataclass
class __UpperCAmelCase :
UpperCamelCase = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} )
UpperCamelCase = dataclasses.field(
default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""} )
UpperCamelCase = dataclasses.field(
default="""no""" , metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} , )
UpperCamelCase = dataclasses.field(
default=1_0 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
UpperCamelCase = dataclasses.field(
default=0.0 , metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} , )
UpperCamelCase = dataclasses.field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , )
UpperCamelCase = dataclasses.field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , )
UpperCamelCase = dataclasses.field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , )
UpperCamelCase = dataclasses.field(
default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , )
UpperCamelCase = dataclasses.field(
default=1_0_0 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
UpperCamelCase = dataclasses.field(
default=lowerCamelCase__ , metadata={"""help""": """Random seed for initialization."""} , )
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] ) -> Dict:
UpperCAmelCase : List[str] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
UpperCAmelCase : int = dataset.filter(lambda UpperCAmelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
UpperCAmelCase : Tuple = int(eval_result * len(UpperCAmelCase ) )
print(UpperCAmelCase )
UpperCAmelCase : Optional[int] = dataset.sort('''probability''' , reverse=UpperCAmelCase )
UpperCAmelCase : str = dataset.select(range(UpperCAmelCase ) )
UpperCAmelCase : str = dataset.remove_columns(['''label''', '''probability'''] )
UpperCAmelCase : Any = dataset.rename_column('''prediction''' , '''label''' )
UpperCAmelCase : List[str] = dataset.map(lambda UpperCAmelCase : {"label": idalabel[example["label"]]} )
UpperCAmelCase : List[Any] = dataset.shuffle(seed=args.seed )
UpperCAmelCase : Union[str, Any] = os.path.join(UpperCAmelCase , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(UpperCAmelCase , index=UpperCAmelCase )
else:
dataset.to_json(UpperCAmelCase )
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , **UpperCAmelCase : List[str] ) -> List[Any]:
UpperCAmelCase : Optional[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase : Tuple = STModelArguments(model_name_or_path=UpperCAmelCase )
UpperCAmelCase : int = STDataArguments(train_file=UpperCAmelCase , infer_file=UpperCAmelCase )
UpperCAmelCase : Union[str, Any] = STTrainingArguments(output_dir=UpperCAmelCase )
UpperCAmelCase : List[str] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(UpperCAmelCase ).items():
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
for key, value in kwargs.items():
if hasattr(UpperCAmelCase , UpperCAmelCase ):
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Sanity checks
UpperCAmelCase : Union[str, Any] = {}
UpperCAmelCase : int = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
UpperCAmelCase : List[str] = args.train_file
UpperCAmelCase : int = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
UpperCAmelCase : str = args.eval_file
for key in data_files:
UpperCAmelCase : Tuple = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
UpperCAmelCase : Tuple = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
UpperCAmelCase : Dict = f'''{args.output_dir}/self-train_iter-{{}}'''.format
UpperCAmelCase : Tuple = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=UpperCAmelCase )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
accelerator.wait_for_everyone()
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : str = 0
UpperCAmelCase : int = False
# Show the progress bar
UpperCAmelCase : Tuple = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
UpperCAmelCase : int = data_dir_format(UpperCAmelCase )
assert os.path.exists(UpperCAmelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
UpperCAmelCase : Any = os.path.join(UpperCAmelCase , '''stage-1''' )
UpperCAmelCase : List[Any] = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(UpperCAmelCase , UpperCAmelCase ):
arguments_dict.update({key: value} )
UpperCAmelCase : List[str] = os.path.join(UpperCAmelCase , '''best-checkpoint''' , UpperCAmelCase )
if os.path.exists(UpperCAmelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , UpperCAmelCase , UpperCAmelCase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , UpperCAmelCase )
finetune(**UpperCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCAmelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , UpperCAmelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
UpperCAmelCase : List[str] = os.path.join(UpperCAmelCase , '''best-checkpoint''' )
UpperCAmelCase : str = os.path.join(UpperCAmelCase , '''stage-2''' )
# Update arguments_dict
UpperCAmelCase : Dict = model_path
UpperCAmelCase : Optional[Any] = data_files['''train''']
UpperCAmelCase : int = current_output_dir
UpperCAmelCase : Tuple = os.path.join(UpperCAmelCase , '''best-checkpoint''' , UpperCAmelCase )
if os.path.exists(UpperCAmelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , UpperCAmelCase , UpperCAmelCase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , UpperCAmelCase )
finetune(**UpperCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCAmelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , UpperCAmelCase )
UpperCAmelCase : Any = iteration
UpperCAmelCase : Optional[Any] = data_dir_format(iteration + 1 )
UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(os.path.join(UpperCAmelCase , '''best-checkpoint''' ) )
UpperCAmelCase : List[str] = config.idalabel
UpperCAmelCase : str = os.path.join(UpperCAmelCase , '''eval_results_best-checkpoint.json''' )
UpperCAmelCase : Union[str, Any] = os.path.join(UpperCAmelCase , '''test_results_best-checkpoint.json''' )
assert os.path.exists(UpperCAmelCase )
with open(UpperCAmelCase , '''r''' ) as f:
UpperCAmelCase : Optional[Any] = float(json.load(UpperCAmelCase )[args.eval_metric] )
UpperCAmelCase : Optional[int] = os.path.join(UpperCAmelCase , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(UpperCAmelCase )
# Loading the dataset from local csv or json files.
UpperCAmelCase : Tuple = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
UpperCAmelCase : List[Any] = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
shutil.copy(UpperCAmelCase , os.path.join(UpperCAmelCase , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(UpperCAmelCase ):
shutil.copy(UpperCAmelCase , os.path.join(UpperCAmelCase , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
accelerator.wait_for_everyone()
UpperCAmelCase : Dict = os.path.join(UpperCAmelCase , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
UpperCAmelCase : Optional[int] = eval_result
if best_iteration is None:
UpperCAmelCase : Dict = new_iteration
UpperCAmelCase : Tuple = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
UpperCAmelCase : List[str] = new_iteration
UpperCAmelCase : str = new_eval_result
UpperCAmelCase : Tuple = 0
else:
if new_eval_result == best_eval_result:
UpperCAmelCase : str = new_iteration
UpperCAmelCase : int = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
UpperCAmelCase : Union[str, Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , UpperCAmelCase )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCAmelCase , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(UpperCAmelCase , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCAmelCase , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(UpperCAmelCase , '''eval_results_best-iteration.json''' ) , )
| 336 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __UpperCAmelCase :
def __init__( self : List[Any], __A : List[str], __A : List[str]=1_3, __A : Any=6_4, __A : Optional[Any]=2, __A : str=3, __A : str=True, __A : str=True, __A : Optional[Any]=3_2, __A : List[str]=5, __A : int=4, __A : str=3_7, __A : str="gelu", __A : Dict=0.1, __A : List[Any]=0.1, __A : Dict=1_0, __A : int=0.0_2, __A : Any=[1, 1_6, 4, 4], __A : Optional[int]=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : List[str] = patch_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : int = scope
UpperCAmelCase : List[str] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase : str = (self.image_size // 3_2) ** 2
UpperCAmelCase : List[str] = num_patches + 1
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : str = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Any ):
UpperCAmelCase : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__A, )
def __magic_name__ ( self : Optional[int], __A : Optional[int], __A : int, __A : Tuple ):
UpperCAmelCase : int = ViTHybridModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : Dict, __A : str, __A : List[str] ):
UpperCAmelCase : str = self.type_sequence_label_size
UpperCAmelCase : List[Any] = ViTHybridForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : int ):
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs
UpperCAmelCase : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = ViTHybridModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : int ):
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, nn.Linear ) )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(__A )
UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = _config_zero_init(__A )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(config=__A )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase : Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@slow
def __magic_name__ ( self : List[str] ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Union[str, Any] = ViTHybridModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> Tuple:
UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : str ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
UpperCAmelCase : Tuple = self.default_image_processor
UpperCAmelCase : int = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**__A )
# verify the logits
UpperCAmelCase : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Optional[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
@require_accelerate
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Union[str, Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' )
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=__A, return_tensors='''pt''' )
UpperCAmelCase : Dict = model(**__A )
UpperCAmelCase : Any = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase : Dict = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
| 336 | 1 |
from __future__ import annotations
from math import pow, sqrt
def a__ ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase , 2 ) - pow(UpperCAmelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase , 2 ) - pow(UpperCAmelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase , 2 ) + pow(UpperCAmelCase , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def a__ ( ) -> tuple[list[int], int]:
UpperCAmelCase : str = [randint(-1_000 , 1_000 ) for i in range(10 )]
UpperCAmelCase : Any = randint(-5_000 , 5_000 )
return (arr, r)
_lowerCamelCase : Any = make_dataset()
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, ...]:
for triplet in permutations(UpperCAmelCase , 3 ):
if sum(UpperCAmelCase ) == target:
return tuple(sorted(UpperCAmelCase ) )
return (0, 0, 0)
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, int, int]:
arr.sort()
UpperCAmelCase : Tuple = len(UpperCAmelCase )
for i in range(n - 1 ):
UpperCAmelCase , UpperCAmelCase : int = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def a__ ( ) -> tuple[float, float]:
UpperCAmelCase : Union[str, Any] = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
UpperCAmelCase : Tuple = '''
triplet_sum1(*dataset)
'''
UpperCAmelCase : List[str] = '''
triplet_sum2(*dataset)
'''
UpperCAmelCase : Tuple = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 )
UpperCAmelCase : str = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 )
return (min(UpperCAmelCase ), min(UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCamelCase : int = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 336 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """donut-swin"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any, __A : Dict=2_2_4, __A : Tuple=4, __A : Optional[Any]=3, __A : List[str]=9_6, __A : Tuple=[2, 2, 6, 2], __A : Dict=[3, 6, 1_2, 2_4], __A : Tuple=7, __A : Dict=4.0, __A : int=True, __A : Any=0.0, __A : List[Any]=0.0, __A : Tuple=0.1, __A : List[Any]="gelu", __A : Union[str, Any]=False, __A : Union[str, Any]=0.0_2, __A : Tuple=1E-5, **__A : Optional[int], ):
super().__init__(**__A )
UpperCAmelCase : str = image_size
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : Any = num_channels
UpperCAmelCase : Union[str, Any] = embed_dim
UpperCAmelCase : List[Any] = depths
UpperCAmelCase : List[Any] = len(__A )
UpperCAmelCase : Dict = num_heads
UpperCAmelCase : List[str] = window_size
UpperCAmelCase : List[Any] = mlp_ratio
UpperCAmelCase : Union[str, Any] = qkv_bias
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = drop_path_rate
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : List[Any] = use_absolute_embeddings
UpperCAmelCase : Tuple = layer_norm_eps
UpperCAmelCase : Tuple = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(__A ) - 1) )
| 336 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __UpperCAmelCase :
def __magic_name__ ( self : int, __A : Dict ):
raise NotImplementedError()
def __magic_name__ ( self : int ):
raise NotImplementedError()
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : str, __A : "AutoTokenizer", __A : bool = False, **__A : str ):
UpperCAmelCase : List[str] = tokenizer
UpperCAmelCase : str = skip_prompt
UpperCAmelCase : List[str] = decode_kwargs
# variables used in the streaming process
UpperCAmelCase : Dict = []
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = True
def __magic_name__ ( self : Dict, __A : Optional[int] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
UpperCAmelCase : Union[str, Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase : Optional[int] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase : Any = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase : Optional[Any] = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def __magic_name__ ( self : str ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
UpperCAmelCase : int = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
UpperCAmelCase : Dict = text[self.print_len :]
UpperCAmelCase : List[Any] = []
UpperCAmelCase : List[Any] = 0
else:
UpperCAmelCase : Dict = ''''''
UpperCAmelCase : str = True
self.on_finalized_text(__A, stream_end=__A )
def __magic_name__ ( self : List[str], __A : str, __A : bool = False ):
print(__A, flush=__A, end='''''' if not stream_end else None )
def __magic_name__ ( self : List[Any], __A : Optional[int] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Dict, __A : "AutoTokenizer", __A : bool = False, __A : Optional[float] = None, **__A : str ):
super().__init__(__A, __A, **__A )
UpperCAmelCase : Dict = Queue()
UpperCAmelCase : Any = None
UpperCAmelCase : Any = timeout
def __magic_name__ ( self : Dict, __A : str, __A : bool = False ):
self.text_queue.put(__A, timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal, timeout=self.timeout )
def __iter__( self : int ):
return self
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 336 | 1 |
from __future__ import annotations
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[float, list[float]]:
UpperCAmelCase : str = list(range(len(UpperCAmelCase ) ) )
UpperCAmelCase : List[Any] = [v / w for v, w in zip(UpperCAmelCase , UpperCAmelCase )]
index.sort(key=lambda UpperCAmelCase : ratio[i] , reverse=UpperCAmelCase )
UpperCAmelCase : float = 0
UpperCAmelCase : list[float] = [0] * len(UpperCAmelCase )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase : Dict = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
import numpy
# List of input, output pairs
_lowerCamelCase : Dict = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
_lowerCamelCase : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
_lowerCamelCase : Dict = [2, 4, 1, 5]
_lowerCamelCase : Dict = len(train_data)
_lowerCamelCase : int = 0.0_0_9
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]="train" ) -> Dict:
return calculate_hypothesis_value(UpperCAmelCase , UpperCAmelCase ) - output(
UpperCAmelCase , UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Any:
UpperCAmelCase : str = 0
for i in range(len(UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : str=m ) -> Dict:
UpperCAmelCase : Optional[int] = 0
for i in range(UpperCAmelCase ):
if index == -1:
summation_value += _error(UpperCAmelCase )
else:
summation_value += _error(UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def a__ ( UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase : Dict = summation_of_cost_derivative(UpperCAmelCase , UpperCAmelCase ) / m
return cost_derivative_value
def a__ ( ) -> List[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase : List[str] = 0.000002
UpperCAmelCase : Any = 0
UpperCAmelCase : Dict = 0
while True:
j += 1
UpperCAmelCase : List[Any] = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase ) ):
UpperCAmelCase : List[str] = get_cost_derivative(i - 1 )
UpperCAmelCase : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase , rtol=UpperCAmelCase , ):
break
UpperCAmelCase : int = temp_parameter_vector
print(('''Number of iterations:''', j) )
def a__ ( ) -> List[Any]:
for i in range(len(UpperCAmelCase ) ):
print(('''Actual output value:''', output(UpperCAmelCase , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(UpperCAmelCase , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 336 | 1 |
import collections
import os
import re
from pathlib import Path
_lowerCamelCase : int = "src/transformers"
# Matches is_xxx_available()
_lowerCamelCase : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
_lowerCamelCase : Optional[Any] = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowerCamelCase : int = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
_lowerCamelCase : int = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
_lowerCamelCase : str = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowerCamelCase : List[str] = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
_lowerCamelCase : Optional[Any] = re.compile(R"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowerCamelCase : Tuple = re.compile(R"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
_lowerCamelCase : Optional[int] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
_lowerCamelCase : int = re.compile(R"^\s*try:")
# Catches a line with else:
_lowerCamelCase : str = re.compile(R"^\s*else:")
def a__ ( UpperCAmelCase : Tuple ) -> Dict:
if _re_test_backend.search(UpperCAmelCase ) is None:
return None
UpperCAmelCase : str = [b[0] for b in _re_backend.findall(UpperCAmelCase )]
backends.sort()
return "_and_".join(UpperCAmelCase )
def a__ ( UpperCAmelCase : Dict ) -> Union[str, Any]:
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase : List[Any] = f.readlines()
UpperCAmelCase : Optional[Any] = 0
while line_index < len(UpperCAmelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase : Any = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase : Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCAmelCase ):
UpperCAmelCase : Dict = _re_one_line_import_struct.search(UpperCAmelCase ).groups()[0]
UpperCAmelCase : int = re.findall(r'''\[([^\]]+)\]''' , UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCAmelCase : Dict = _re_import_struct_key_value.search(UpperCAmelCase )
if single_line_import_search is not None:
UpperCAmelCase : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCAmelCase ) > 0]
objects.extend(UpperCAmelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase : int = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCAmelCase : Dict = lines[line_index]
if _re_import_struct_add_one.search(UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCAmelCase ) is not None:
UpperCAmelCase : Optional[Any] = _re_import_struct_add_many.search(UpperCAmelCase ).groups()[0].split(''', ''' )
UpperCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(UpperCAmelCase ) > 0]
objects.extend(UpperCAmelCase )
elif _re_between_brackets.search(UpperCAmelCase ) is not None:
UpperCAmelCase : str = _re_between_brackets.search(UpperCAmelCase ).groups()[0].split(''', ''' )
UpperCAmelCase : List[Any] = [obj[1:-1] for obj in imports if len(UpperCAmelCase ) > 0]
objects.extend(UpperCAmelCase )
elif _re_quote_object.search(UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(UpperCAmelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase : Union[str, Any] = []
while (
line_index < len(UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCAmelCase : Optional[int] = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase : Optional[int] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCAmelCase : Optional[Any] = lines[line_index]
UpperCAmelCase : int = _re_import.search(UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> str:
def find_duplicates(UpperCAmelCase : Union[str, Any] ):
return [k for k, v in collections.Counter(UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase : List[Any] = []
for key in import_dict_objects.keys():
UpperCAmelCase : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
UpperCAmelCase : List[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase : Optional[int] = '''base imports''' if key == '''none''' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def a__ ( ) -> str:
UpperCAmelCase : Tuple = []
for root, _, files in os.walk(UpperCAmelCase ):
if "__init__.py" in files:
UpperCAmelCase : int = os.path.join(UpperCAmelCase , '''__init__.py''' )
UpperCAmelCase : List[Any] = parse_init(UpperCAmelCase )
if objects is not None:
UpperCAmelCase : Dict = analyze_results(*UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
UpperCAmelCase : Tuple = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(UpperCAmelCase ) )
if len(UpperCAmelCase ) > 0:
raise ValueError('''\n\n'''.join(UpperCAmelCase ) )
def a__ ( ) -> Tuple:
UpperCAmelCase : Dict = []
for path, directories, files in os.walk(UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCAmelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCAmelCase : Optional[Any] = str((Path(UpperCAmelCase ) / folder).relative_to(UpperCAmelCase ) )
UpperCAmelCase : Any = short_path.replace(os.path.sep , '''.''' )
submodules.append(UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase : Any = str((Path(UpperCAmelCase ) / fname).relative_to(UpperCAmelCase ) )
UpperCAmelCase : List[Any] = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(UpperCAmelCase )
return submodules
_lowerCamelCase : int = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def a__ ( ) -> Any:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
UpperCAmelCase : Optional[int] = direct_transformers_import(UpperCAmelCase )
UpperCAmelCase : Any = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(UpperCAmelCase , '''__init__.py''' ) , '''r''' ) as f:
UpperCAmelCase : Dict = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , UpperCAmelCase ) ) )
UpperCAmelCase : int = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(UpperCAmelCase ) > 0:
UpperCAmelCase : Tuple = '''\n'''.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
f'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 336 |
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[Any] = len(UpperCAmelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
UpperCAmelCase : Optional[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCAmelCase : Any = left
UpperCAmelCase : List[str] = point
elif point > right:
UpperCAmelCase : Any = right
UpperCAmelCase : List[str] = point
else:
if item < current_item:
UpperCAmelCase : Optional[int] = point - 1
else:
UpperCAmelCase : str = point + 1
return None
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ) -> Dict:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
elif point > right:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , point + 1 , UpperCAmelCase )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> int:
if collection != sorted(UpperCAmelCase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
_lowerCamelCase : Optional[int] = 0
if debug == 1:
_lowerCamelCase : Dict = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
_lowerCamelCase : List[Any] = 6_7
_lowerCamelCase : Optional[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 336 | 1 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def a__ ( UpperCAmelCase : dict , UpperCAmelCase : str , UpperCAmelCase : set , UpperCAmelCase : set , UpperCAmelCase : dict , UpperCAmelCase : dict , UpperCAmelCase : PriorityQueue , UpperCAmelCase : dict , UpperCAmelCase : float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
UpperCAmelCase : Tuple = cst_fwd.get(UpperCAmelCase , np.inf )
UpperCAmelCase : List[Any] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
UpperCAmelCase : Any = new_cost_f
UpperCAmelCase : Dict = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
UpperCAmelCase : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def a__ ( UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : dict , UpperCAmelCase : dict ) -> int:
UpperCAmelCase : Any = -1
UpperCAmelCase : Optional[int] = set()
UpperCAmelCase : Optional[Any] = set()
UpperCAmelCase : Union[str, Any] = {source: 0}
UpperCAmelCase : Dict = {destination: 0}
UpperCAmelCase : Union[str, Any] = {source: None}
UpperCAmelCase : Tuple = {destination: None}
UpperCAmelCase : PriorityQueue[Any] = PriorityQueue()
UpperCAmelCase : PriorityQueue[Any] = PriorityQueue()
UpperCAmelCase : Tuple = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
UpperCAmelCase , UpperCAmelCase : Optional[int] = queue_forward.get()
visited_forward.add(UpperCAmelCase )
UpperCAmelCase , UpperCAmelCase : int = queue_backward.get()
visited_backward.add(UpperCAmelCase )
UpperCAmelCase : List[str] = pass_and_relaxation(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , )
UpperCAmelCase : Tuple = pass_and_relaxation(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
UpperCAmelCase : int = shortest_distance
return shortest_path_distance
_lowerCamelCase : Union[str, Any] = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
_lowerCamelCase : Tuple = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else ''''''
UpperCAmelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''),
(f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any:
for i in range(config.num_hidden_layers ):
UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : str = q_bias
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase : str = gamma_a
UpperCAmelCase : Dict = gamma_a
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : str = val
def a__ ( ) -> Optional[int]:
UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]:
UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True
UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase : List[Any] = 1_024
UpperCAmelCase : Optional[Any] = 4_096
UpperCAmelCase : Any = 24
UpperCAmelCase : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : List[Any] = '''huggingface/label-files'''
UpperCAmelCase : Any = '''rvlcdip-id2label.json'''
UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = idalabel
UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model''']
UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase )
# load HuggingFace model
UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase )
model.eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image
UpperCAmelCase : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase )
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
UpperCAmelCase : str = encoding['''pixel_values''']
UpperCAmelCase : Any = model(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify logits
UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected"
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
if has_lm_head:
UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 336 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Any, *__A : Optional[Any], **__A : Optional[Any] ):
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''', __A, )
super().__init__(*__A, **__A )
| 336 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[int], __A : Optional[int], __A : Any=1_3, __A : str=7, __A : Optional[int]=True, __A : Tuple=True, __A : Union[str, Any]=True, __A : Any=True, __A : Optional[int]=9_9, __A : Tuple=3_2, __A : str=5, __A : Union[str, Any]=4, __A : List[str]=3_7, __A : Tuple="gelu", __A : Optional[int]=0.1, __A : int=0.1, __A : Optional[Any]=5_1_2, __A : int=1_6, __A : Optional[Any]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=4, ):
UpperCAmelCase : Any = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Any = seq_length
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : str = use_attention_mask
UpperCAmelCase : List[str] = use_token_type_ids
UpperCAmelCase : int = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = num_choices
def __magic_name__ ( self : str ):
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase : List[Any] = None
if self.use_attention_mask:
UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Any = None
if self.use_token_type_ids:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = RobertaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__A, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ ( self : int ):
UpperCAmelCase : Any = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = config_and_inputs
UpperCAmelCase : Any = True
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Dict = FlaxRobertaModelTester(self )
@slow
def __magic_name__ ( self : Any ):
for model_class_name in self.all_model_classes:
UpperCAmelCase : Dict = model_class_name.from_pretrained('''roberta-base''', from_pt=__A )
UpperCAmelCase : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
| 336 | 1 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCamelCase : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def a__ ( UpperCAmelCase : np.ndarray , UpperCAmelCase : float , UpperCAmelCase : int = 16_000 ) -> Any:
UpperCAmelCase : Optional[Any] = int(round(sample_rate * max_length ) )
if len(UpperCAmelCase ) <= sample_length:
return wav
UpperCAmelCase : str = randint(0 , len(UpperCAmelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCAmelCase :
UpperCamelCase = field(default=lowerCamelCase__ , metadata={"""help""": """Name of a dataset from the datasets package"""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """A file containing the training audio paths and labels."""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
UpperCamelCase = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
UpperCamelCase = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
UpperCamelCase = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
UpperCamelCase = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=2_0 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class __UpperCAmelCase :
UpperCamelCase = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Name or path of preprocessor config."""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def __magic_name__ ( self : Dict ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''', __A, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def a__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , UpperCAmelCase , UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase )
transformers.utils.logging.set_verbosity(UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCAmelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
UpperCAmelCase : List[Any] = DatasetDict()
UpperCAmelCase : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
f'''{", ".join(raw_datasets["train"].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'''Make sure to set `--label_column_name` to the correct text column - one of '''
f'''{", ".join(raw_datasets["train"].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCAmelCase : Dict = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCAmelCase : str = feature_extractor.model_input_names[0]
def train_transforms(UpperCAmelCase : Union[str, Any] ):
UpperCAmelCase : int = []
for audio in batch[data_args.audio_column_name]:
UpperCAmelCase : List[Any] = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(UpperCAmelCase )
UpperCAmelCase : Union[str, Any] = feature_extractor(UpperCAmelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase : Dict = {model_input_name: inputs.get(UpperCAmelCase )}
UpperCAmelCase : Union[str, Any] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(UpperCAmelCase : Any ):
UpperCAmelCase : Optional[Any] = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
UpperCAmelCase : Optional[int] = feature_extractor(UpperCAmelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase : str = {model_input_name: inputs.get(UpperCAmelCase )}
UpperCAmelCase : str = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCAmelCase : int = raw_datasets['''train'''].features[data_args.label_column_name].names
UpperCAmelCase , UpperCAmelCase : Any = {}, {}
for i, label in enumerate(UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = str(UpperCAmelCase )
UpperCAmelCase : str = label
# Load the accuracy metric from the datasets package
UpperCAmelCase : Dict = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(UpperCAmelCase : int ):
UpperCAmelCase : List[Any] = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=UpperCAmelCase , references=eval_pred.label_ids )
UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(UpperCAmelCase ) , labelaid=UpperCAmelCase , idalabel=UpperCAmelCase , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase : Any = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCAmelCase : int = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(UpperCAmelCase , output_all_columns=UpperCAmelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCAmelCase : Dict = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(UpperCAmelCase , output_all_columns=UpperCAmelCase )
# Initialize our trainer
UpperCAmelCase : List[str] = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=UpperCAmelCase , tokenizer=UpperCAmelCase , )
# Training
if training_args.do_train:
UpperCAmelCase : List[str] = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase : Optional[int] = last_checkpoint
UpperCAmelCase : str = trainer.train(resume_from_checkpoint=UpperCAmelCase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase : str = trainer.evaluate()
trainer.log_metrics('''eval''' , UpperCAmelCase )
trainer.save_metrics('''eval''' , UpperCAmelCase )
# Write model card and (optionally) push to hub
UpperCAmelCase : Union[str, Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase )
else:
trainer.create_model_card(**UpperCAmelCase )
if __name__ == "__main__":
main()
| 336 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {"vocab_file": "vocab.txt"}
_lowerCamelCase : List[str] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase : List[Any] = {
"facebook/esm2_t6_8M_UR50D": 1_0_2_4,
"facebook/esm2_t12_35M_UR50D": 1_0_2_4,
}
def a__ ( UpperCAmelCase : List[str] ) -> Any:
with open(UpperCAmelCase , '''r''' ) as f:
UpperCAmelCase : Dict = f.read().splitlines()
return [l.strip() for l in lines]
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Any, __A : Dict, __A : List[Any]="<unk>", __A : List[str]="<cls>", __A : Any="<pad>", __A : Union[str, Any]="<mask>", __A : int="<eos>", **__A : Tuple, ):
super().__init__(**__A )
UpperCAmelCase : Tuple = load_vocab_file(__A )
UpperCAmelCase : List[Any] = dict(enumerate(self.all_tokens ) )
UpperCAmelCase : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase : Union[str, Any] = unk_token
UpperCAmelCase : Optional[Any] = cls_token
UpperCAmelCase : Optional[int] = pad_token
UpperCAmelCase : Optional[int] = mask_token
UpperCAmelCase : List[str] = eos_token
UpperCAmelCase : Optional[Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __magic_name__ ( self : Tuple, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : List[Any], __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : Optional[Any], **__A : Union[str, Any] ):
return text.split()
def __magic_name__ ( self : Optional[int], __A : Dict=False ):
return len(self._id_to_token )
def __magic_name__ ( self : int ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __magic_name__ ( self : Tuple, __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : Union[str, Any], __A : List[int], __A : Optional[List[int]] = None ):
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : Optional[int] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __magic_name__ ( self : Any, __A : List, __A : Optional[List] = None, __A : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase : Dict = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
mask += [0] * len(__A ) + [1]
return mask
def __magic_name__ ( self : Optional[int], __A : List[Any], __A : Dict ):
UpperCAmelCase : Union[str, Any] = os.path.join(__A, (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(__A, '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __magic_name__ ( self : Dict ):
return self.get_vocab_size(with_added_tokens=__A )
def __magic_name__ ( self : Optional[int], __A : Union[List[str], List[AddedToken]], __A : bool = False ):
return super()._add_tokens(__A, special_tokens=__A )
| 336 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """bloom"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : Tuple, __A : List[str]=2_5_0_8_8_0, __A : List[Any]=6_4, __A : int=2, __A : Any=8, __A : Tuple=1E-5, __A : Tuple=0.0_2, __A : Tuple=True, __A : Tuple=1, __A : Dict=2, __A : str=False, __A : Optional[int]=0.0, __A : int=0.0, __A : Optional[Any]=1, __A : Any=False, **__A : Tuple, ):
UpperCAmelCase : Tuple = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase : List[Any] = kwargs.pop('''n_embed''', __A )
UpperCAmelCase : Optional[int] = hidden_size if n_embed is None else n_embed
UpperCAmelCase : List[Any] = n_layer
UpperCAmelCase : str = n_head
UpperCAmelCase : List[Any] = layer_norm_epsilon
UpperCAmelCase : int = initializer_range
UpperCAmelCase : Optional[int] = use_cache
UpperCAmelCase : List[str] = pretraining_tp
UpperCAmelCase : List[Any] = apply_residual_connection_post_layernorm
UpperCAmelCase : Optional[Any] = hidden_dropout
UpperCAmelCase : Union[str, Any] = attention_dropout
UpperCAmelCase : Tuple = bos_token_id
UpperCAmelCase : List[str] = eos_token_id
UpperCAmelCase : Optional[Any] = slow_but_exact
super().__init__(bos_token_id=__A, eos_token_id=__A, **__A )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = version.parse("""1.12""" )
def __init__( self : Optional[Any], __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ):
super().__init__(__A, task=__A, patching_specs=__A, use_past=__A )
if not getattr(self._config, '''pad_token_id''', __A ):
# TODO: how to do that better?
UpperCAmelCase : Optional[Any] = 0
@property
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__A, direction='''inputs''', inverted_values_shape=__A )
UpperCAmelCase : Dict = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __magic_name__ ( self : Optional[Any] ):
return self._config.n_layer
@property
def __magic_name__ ( self : Tuple ):
return self._config.n_head
@property
def __magic_name__ ( self : Optional[int] ):
return 1E-3
def __magic_name__ ( self : Optional[int], __A : "PreTrainedTokenizer", __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional["TensorType"] = None, ):
UpperCAmelCase : str = super(__A, self ).generate_dummy_inputs(
__A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : List[str] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase : Optional[Any] = seqlen + 2
UpperCAmelCase : int = self._config.hidden_size // self.num_attention_heads
UpperCAmelCase : List[str] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
UpperCAmelCase : List[str] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
UpperCAmelCase : Any = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
UpperCAmelCase : Optional[int] = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase : str = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase : Optional[int] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 )
return ordered_inputs
@property
def __magic_name__ ( self : Optional[Any] ):
return 1_3
| 336 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A, '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__A, '''depth_multiplier''' ) )
class __UpperCAmelCase :
def __init__( self : int, __A : List[Any], __A : str=1_3, __A : Dict=3, __A : int=3_2, __A : int=0.2_5, __A : List[str]=8, __A : int=8, __A : Dict=6, __A : str=3_2, __A : Any=True, __A : str=True, __A : int=True, __A : Union[str, Any]="relu6", __A : Any=1_2_8_0, __A : List[Any]=0.1, __A : Optional[Any]=0.0_2, __A : Tuple=True, __A : List[Any]=True, __A : str=1_0, __A : Optional[Any]=None, ):
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : str = image_size
UpperCAmelCase : Optional[int] = depth_multiplier
UpperCAmelCase : Union[str, Any] = depth_divisible_by
UpperCAmelCase : Optional[Any] = min_depth
UpperCAmelCase : List[str] = expand_ratio
UpperCAmelCase : Dict = tf_padding
UpperCAmelCase : str = output_stride
UpperCAmelCase : Union[str, Any] = first_layer_is_expansion
UpperCAmelCase : List[Any] = finegrained_output
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCAmelCase : Optional[Any] = classifier_dropout_prob
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Any = scope
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Dict = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def __magic_name__ ( self : List[Any], __A : Dict, __A : Optional[Any], __A : Optional[int], __A : Union[str, Any] ):
UpperCAmelCase : Any = MobileNetVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
self.parent.assertEqual(
result.pooler_output.shape, (self.batch_size, self.last_hidden_size), )
def __magic_name__ ( self : str, __A : Union[str, Any], __A : Dict, __A : Optional[Any], __A : str ):
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : Any = MobileNetVaForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : List[str], __A : Dict, __A : Dict ):
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Dict = MobileNetVaForSemanticSegmentation(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCAmelCase : Optional[Any] = model(__A, labels=__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = MobileNetVaModelTester(self )
UpperCAmelCase : List[Any] = MobileNetVaConfigTester(self, config_class=__A, has_text_modality=__A )
def __magic_name__ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Tuple ):
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def __magic_name__ ( self : Any ):
pass
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : int ):
def check_hidden_states_output(__A : Any, __A : Optional[Any], __A : str ):
UpperCAmelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Optional[Any] = outputs.hidden_states
UpperCAmelCase : List[Any] = 1_6
self.assertEqual(len(__A ), __A )
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def __magic_name__ ( self : Dict ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = MobileNetVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> int:
UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[Any] ):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A )
UpperCAmelCase : Optional[int] = self.default_image_processor
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : str = model(**__A )
# verify the logits
UpperCAmelCase : int = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = model.to(__A )
UpperCAmelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**__A )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase : Tuple = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
], device=__A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], __A, atol=1E-4 ) )
| 336 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = DiTPipeline
UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase = False
def __magic_name__ ( self : List[Any] ):
torch.manual_seed(0 )
UpperCAmelCase : Any = TransformeraDModel(
sample_size=1_6, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=__A, activation_fn='''gelu-approximate''', num_embeds_ada_norm=1_0_0_0, norm_type='''ada_norm_zero''', norm_elementwise_affine=__A, )
UpperCAmelCase : int = AutoencoderKL()
UpperCAmelCase : List[str] = DDIMScheduler()
UpperCAmelCase : str = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def __magic_name__ ( self : Any, __A : Union[str, Any], __A : Any=0 ):
if str(__A ).startswith('''mps''' ):
UpperCAmelCase : Dict = torch.manual_seed(__A )
else:
UpperCAmelCase : Tuple = torch.Generator(device=__A ).manual_seed(__A )
UpperCAmelCase : Tuple = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__ ( self : Dict ):
UpperCAmelCase : str = '''cpu'''
UpperCAmelCase : str = self.get_dummy_components()
UpperCAmelCase : int = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Tuple = self.get_dummy_inputs(__A )
UpperCAmelCase : List[str] = pipe(**__A ).images
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 1_6, 1_6, 3) )
UpperCAmelCase : Any = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
UpperCAmelCase : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A, 1E-3 )
def __magic_name__ ( self : Dict ):
self._test_inference_batch_single_identical(relax_max_difference=__A, expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def __magic_name__ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
UpperCAmelCase : Union[str, Any] = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
UpperCAmelCase : Tuple = pipe.get_label_ids(__A )
UpperCAmelCase : str = pipe(__A, generator=__A, num_inference_steps=4_0, output_type='''np''' ).images
for word, image in zip(__A, __A ):
UpperCAmelCase : Optional[int] = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __magic_name__ ( self : Any ):
UpperCAmelCase : int = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
UpperCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
UpperCAmelCase : List[Any] = ['''vase''', '''umbrella''']
UpperCAmelCase : Dict = pipe.get_label_ids(__A )
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(__A, generator=__A, num_inference_steps=2_5, output_type='''np''' ).images
for word, image in zip(__A, __A ):
UpperCAmelCase : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 336 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """codegen"""
UpperCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any, __A : Optional[int]=5_0_4_0_0, __A : Tuple=2_0_4_8, __A : Optional[int]=2_0_4_8, __A : List[str]=4_0_9_6, __A : List[str]=2_8, __A : Union[str, Any]=1_6, __A : Tuple=6_4, __A : Union[str, Any]=None, __A : Union[str, Any]="gelu_new", __A : Any=0.0, __A : Dict=0.0, __A : str=0.0, __A : Optional[int]=1E-5, __A : Any=0.0_2, __A : Any=True, __A : Union[str, Any]=5_0_2_5_6, __A : List[str]=5_0_2_5_6, __A : int=False, **__A : List[Any], ):
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Tuple = n_ctx
UpperCAmelCase : Tuple = n_positions
UpperCAmelCase : Optional[int] = n_embd
UpperCAmelCase : Union[str, Any] = n_layer
UpperCAmelCase : List[str] = n_head
UpperCAmelCase : Tuple = n_inner
UpperCAmelCase : int = rotary_dim
UpperCAmelCase : List[Any] = activation_function
UpperCAmelCase : List[str] = resid_pdrop
UpperCAmelCase : Optional[Any] = embd_pdrop
UpperCAmelCase : str = attn_pdrop
UpperCAmelCase : Tuple = layer_norm_epsilon
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : Any = bos_token_id
UpperCAmelCase : List[str] = eos_token_id
super().__init__(
bos_token_id=__A, eos_token_id=__A, tie_word_embeddings=__A, **__A )
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Any, __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ):
super().__init__(__A, task=__A, patching_specs=__A, use_past=__A )
if not getattr(self._config, '''pad_token_id''', __A ):
# TODO: how to do that better?
UpperCAmelCase : Union[str, Any] = 0
@property
def __magic_name__ ( self : str ):
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__A, direction='''inputs''' )
UpperCAmelCase : int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __magic_name__ ( self : Dict ):
return self._config.n_layer
@property
def __magic_name__ ( self : List[str] ):
return self._config.n_head
def __magic_name__ ( self : str, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ):
UpperCAmelCase : Union[str, Any] = super(__A, self ).generate_dummy_inputs(
__A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase : str = seqlen + 2
UpperCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase : Optional[int] = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase : Optional[Any] = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase : Dict = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 )
return ordered_inputs
@property
def __magic_name__ ( self : Tuple ):
return 1_3
| 336 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : Tuple ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split(), encoding='''utf-8''', check=__A, )
assert hasattr(self, '''env''' )
def __magic_name__ ( self : Optional[Any], __A : Optional[Any] ):
# configuration for running training on smdistributed Model Parallel
UpperCAmelCase : Union[str, Any] = {
'''enabled''': True,
'''processes_per_host''': 8,
}
UpperCAmelCase : List[str] = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
UpperCAmelCase : int = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
UpperCAmelCase : List[Any] = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''', instance_count=__A, instance_type=self.instance_type, debugger_hook_config=__A, hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_0_0,
}, metric_definitions=self.env.metric_definitions, distribution=__A, py_version='''py36''', )
def __magic_name__ ( self : List[Any], __A : List[Any] ):
TrainingJobAnalytics(__A ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __magic_name__ ( self : Optional[Any], __A : Optional[Any] ):
# create estimator
UpperCAmelCase : Dict = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase : int = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase : Union[str, Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''', 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''', '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss}, __A )
| 336 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 336 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[Any], __A : Dict, __A : Tuple=7, __A : Tuple=3, __A : List[str]=1_8, __A : str=3_0, __A : Any=4_0_0, __A : str=True, __A : List[str]=None, __A : Union[str, Any]=True, __A : int=None, __A : List[Any]=True, __A : Dict=[0.5, 0.5, 0.5], __A : Optional[Any]=[0.5, 0.5, 0.5], ):
UpperCAmelCase : int = size if size is not None else {'''shortest_edge''': 1_8}
UpperCAmelCase : Dict = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
UpperCAmelCase : Any = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : Optional[Any] = num_channels
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Optional[Any] = min_resolution
UpperCAmelCase : str = max_resolution
UpperCAmelCase : str = do_resize
UpperCAmelCase : Any = size
UpperCAmelCase : int = do_center_crop
UpperCAmelCase : Dict = crop_size
UpperCAmelCase : List[Any] = do_normalize
UpperCAmelCase : List[Any] = image_mean
UpperCAmelCase : Tuple = image_std
def __magic_name__ ( self : str ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = LevitImageProcessor if is_vision_available() else None
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : str = LevitImageProcessingTester(self )
@property
def __magic_name__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A, '''image_mean''' ) )
self.assertTrue(hasattr(__A, '''image_std''' ) )
self.assertTrue(hasattr(__A, '''do_normalize''' ) )
self.assertTrue(hasattr(__A, '''do_resize''' ) )
self.assertTrue(hasattr(__A, '''do_center_crop''' ) )
self.assertTrue(hasattr(__A, '''size''' ) )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 1_8} )
self.assertEqual(image_processor.crop_size, {'''height''': 1_8, '''width''': 1_8} )
UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict, size=4_2, crop_size=8_4 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size, {'''height''': 8_4, '''width''': 8_4} )
def __magic_name__ ( self : Optional[Any] ):
pass
def __magic_name__ ( self : Tuple ):
# Initialize image_processing
UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : str = prepare_image_inputs(self.image_processor_tester, equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A, Image.Image )
# Test not batched input
UpperCAmelCase : List[Any] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : List[Any] = image_processing(__A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def __magic_name__ ( self : Union[str, Any] ):
# Initialize image_processing
UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=__A, numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A, np.ndarray )
# Test not batched input
UpperCAmelCase : Any = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : List[Any] = image_processing(__A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def __magic_name__ ( self : Any ):
# Initialize image_processing
UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=__A, torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A, torch.Tensor )
# Test not batched input
UpperCAmelCase : int = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : Any = image_processing(__A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 336 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __UpperCAmelCase :
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
@classmethod
def __magic_name__ ( cls : Any ):
return cls()
@dataclass
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@property
def __magic_name__ ( self : Optional[int] ):
return True
@register_to_config
def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ):
pass
def __magic_name__ ( self : Optional[Any] ):
return KarrasVeSchedulerState.create()
def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ):
UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy()
UpperCAmelCase : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, )
def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 )
else:
UpperCAmelCase : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 )
UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape )
UpperCAmelCase : Tuple = sigma + gamma * sigma
UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : int = sample_hat + sigma_hat * model_output
UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output
UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ):
raise NotImplementedError()
| 336 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any], __A : Dict ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''], model_result['''ss'''] ):
UpperCAmelCase : Optional[Any] = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(__A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : str = '''sshleifer/tiny-gpt2'''
UpperCAmelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__A, inference=__A, sequence_lengths=[8], batch_sizes=[1], eager_mode=__A, multi_process=__A, )
UpperCAmelCase : Tuple = TensorFlowBenchmark(__A )
UpperCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __magic_name__ ( self : int ):
UpperCAmelCase : Union[str, Any] = '''sgugger/tiny-distilbert-classification'''
UpperCAmelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__A, inference=__A, sequence_lengths=[8], batch_sizes=[1], multi_process=__A, only_pretrain_model=__A, )
UpperCAmelCase : List[str] = TensorFlowBenchmark(__A )
UpperCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Union[str, Any] = '''sshleifer/tiny-gpt2'''
UpperCAmelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__A, inference=__A, sequence_lengths=[8], batch_sizes=[1], multi_process=__A, )
UpperCAmelCase : Union[str, Any] = TensorFlowBenchmark(__A )
UpperCAmelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Optional[int] = '''sshleifer/tiny-gpt2'''
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(__A )
UpperCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__A, inference=__A, sequence_lengths=[8], batch_sizes=[1], eager_mode=__A, multi_process=__A, )
UpperCAmelCase : Tuple = TensorFlowBenchmark(__A, [config] )
UpperCAmelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __magic_name__ ( self : Any ):
UpperCAmelCase : Dict = '''sshleifer/tiny-gpt2'''
UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(__A )
UpperCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__A, inference=__A, sequence_lengths=[8], batch_sizes=[1], multi_process=__A, )
UpperCAmelCase : Union[str, Any] = TensorFlowBenchmark(__A, [config] )
UpperCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __magic_name__ ( self : Dict ):
UpperCAmelCase : List[str] = '''sshleifer/tiny-gpt2'''
UpperCAmelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__A, inference=__A, sequence_lengths=[8], batch_sizes=[1], multi_process=__A, )
UpperCAmelCase : Optional[int] = TensorFlowBenchmark(__A )
UpperCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __magic_name__ ( self : str ):
UpperCAmelCase : Optional[int] = '''sshleifer/tiny-gpt2'''
UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(__A )
UpperCAmelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__A, inference=__A, sequence_lengths=[8], batch_sizes=[1], multi_process=__A, )
UpperCAmelCase : int = TensorFlowBenchmark(__A, [config] )
UpperCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Union[str, Any] = '''patrickvonplaten/t5-tiny-random'''
UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(__A )
UpperCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__A, inference=__A, sequence_lengths=[8], batch_sizes=[1], multi_process=__A, )
UpperCAmelCase : List[Any] = TensorFlowBenchmark(__A, configs=[config] )
UpperCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0, '''Cannot do xla on CPU.''' )
def __magic_name__ ( self : Any ):
UpperCAmelCase : Optional[Any] = '''sshleifer/tiny-gpt2'''
UpperCAmelCase : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__A, inference=__A, sequence_lengths=[8], batch_sizes=[1], use_xla=__A, multi_process=__A, )
UpperCAmelCase : str = TensorFlowBenchmark(__A )
UpperCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __magic_name__ ( self : Any ):
UpperCAmelCase : List[str] = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], inference=__A, save_to_csv=__A, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(__A, '''inf_time.csv''' ), inference_memory_csv_file=os.path.join(__A, '''inf_mem.csv''' ), env_info_csv_file=os.path.join(__A, '''env.csv''' ), multi_process=__A, )
UpperCAmelCase : Optional[Any] = TensorFlowBenchmark(__A )
benchmark.run()
self.assertTrue(Path(os.path.join(__A, '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__A, '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__A, '''env.csv''' ) ).exists() )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Union[str, Any] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(__A : int ):
self.assertTrue(hasattr(__A, '''sequential''' ) )
self.assertTrue(hasattr(__A, '''cumulative''' ) )
self.assertTrue(hasattr(__A, '''current''' ) )
self.assertTrue(hasattr(__A, '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID], inference=__A, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(__A, '''log.txt''' ), log_print=__A, trace_memory_line_by_line=__A, eager_mode=__A, multi_process=__A, )
UpperCAmelCase : Optional[Any] = TensorFlowBenchmark(__A )
UpperCAmelCase : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__A, '''log.txt''' ) ).exists() )
| 336 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __UpperCAmelCase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def a__ ( ) -> Dict:
if os.name == "nt":
UpperCAmelCase : List[str] = CursorInfo()
UpperCAmelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Dict = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def a__ ( ) -> Optional[int]:
if os.name == "nt":
UpperCAmelCase : int = CursorInfo()
UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Any = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def a__ ( ) -> Optional[Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 336 | 1 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_lowerCamelCase : List[Any] = "__DUMMY_TRANSFORMERS_USER__"
_lowerCamelCase : int = "Dummy User"
_lowerCamelCase : int = "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"
_lowerCamelCase : Tuple = "https://hub-ci.huggingface.co"
_lowerCamelCase : List[Any] = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
_lowerCamelCase : int = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
_lowerCamelCase : List[str] = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def a__ ( UpperCAmelCase : Tuple ) -> str:
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , UpperCAmelCase )
@pytest.fixture
def a__ ( UpperCAmelCase : Tuple ) -> Optional[Any]:
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , UpperCAmelCase )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , UpperCAmelCase )
@pytest.fixture
def a__ ( UpperCAmelCase : Tuple ) -> Tuple:
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , UpperCAmelCase )
@pytest.fixture
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> str:
HfFolder.save_token(UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def a__ ( ) -> Optional[int]:
return HfApi(endpoint=UpperCAmelCase )
@pytest.fixture(scope='''session''' )
def a__ ( UpperCAmelCase : HfApi ) -> Dict:
UpperCAmelCase : str = HfFolder.get_token()
HfFolder.save_token(UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCAmelCase )
@pytest.fixture
def a__ ( UpperCAmelCase : Dict ) -> Any:
def _cleanup_repo(UpperCAmelCase : Any ):
hf_api.delete_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def a__ ( UpperCAmelCase : Union[str, Any] ) -> List[str]:
@contextmanager
def _temporary_repo(UpperCAmelCase : Union[str, Any] ):
try:
yield repo_id
finally:
cleanup_repo(UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def a__ ( UpperCAmelCase : HfApi , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : str = f'''repo_txt_data-{int(time.time() * 10E3 )}'''
UpperCAmelCase : Dict = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type='''dataset''' , private=UpperCAmelCase )
hf_api.upload_file(
token=UpperCAmelCase , path_or_fileobj=str(UpperCAmelCase ) , path_in_repo='''data/text_data.txt''' , repo_id=UpperCAmelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a__ ( UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] ) -> Optional[int]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def a__ ( UpperCAmelCase : HfApi , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] ) -> Any:
UpperCAmelCase : Dict = f'''repo_zipped_txt_data-{int(time.time() * 10E3 )}'''
UpperCAmelCase : Optional[Any] = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type='''dataset''' , private=UpperCAmelCase )
hf_api.upload_file(
token=UpperCAmelCase , path_or_fileobj=str(UpperCAmelCase ) , path_in_repo='''data.zip''' , repo_id=UpperCAmelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a__ ( UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Optional[int]:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def a__ ( UpperCAmelCase : HfApi , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] ) -> List[Any]:
UpperCAmelCase : Optional[int] = f'''repo_zipped_img_data-{int(time.time() * 10E3 )}'''
UpperCAmelCase : Optional[int] = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type='''dataset''' , private=UpperCAmelCase )
hf_api.upload_file(
token=UpperCAmelCase , path_or_fileobj=str(UpperCAmelCase ) , path_in_repo='''data.zip''' , repo_id=UpperCAmelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] ) -> int:
return hf_private_dataset_repo_zipped_img_data_
| 336 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase : Tuple = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 336 | 1 |
from ..utils import DummyObject, requires_backends
class __UpperCAmelCase ( metaclass=lowerCamelCase__ ):
UpperCamelCase = ["""flax"""]
def __init__( self : Optional[Any], *__A : List[Any], **__A : Union[str, Any] ):
requires_backends(self, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : List[Any], *__A : int, **__A : List[Any] ):
requires_backends(cls, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : Tuple, *__A : str, **__A : Tuple ):
requires_backends(cls, ['''flax'''] )
class __UpperCAmelCase ( metaclass=lowerCamelCase__ ):
UpperCamelCase = ["""flax"""]
def __init__( self : Any, *__A : Union[str, Any], **__A : Optional[int] ):
requires_backends(self, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : Dict, *__A : Any, **__A : int ):
requires_backends(cls, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : Any, *__A : Union[str, Any], **__A : str ):
requires_backends(cls, ['''flax'''] )
class __UpperCAmelCase ( metaclass=lowerCamelCase__ ):
UpperCamelCase = ["""flax"""]
def __init__( self : Tuple, *__A : Union[str, Any], **__A : Any ):
requires_backends(self, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : str, *__A : Union[str, Any], **__A : List[Any] ):
requires_backends(cls, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : Optional[int], *__A : List[Any], **__A : Optional[int] ):
requires_backends(cls, ['''flax'''] )
class __UpperCAmelCase ( metaclass=lowerCamelCase__ ):
UpperCamelCase = ["""flax"""]
def __init__( self : Dict, *__A : Any, **__A : Optional[int] ):
requires_backends(self, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : str, *__A : Tuple, **__A : List[str] ):
requires_backends(cls, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : Dict, *__A : Any, **__A : Tuple ):
requires_backends(cls, ['''flax'''] )
class __UpperCAmelCase ( metaclass=lowerCamelCase__ ):
UpperCamelCase = ["""flax"""]
def __init__( self : Optional[Any], *__A : int, **__A : List[str] ):
requires_backends(self, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : Optional[int], *__A : Tuple, **__A : int ):
requires_backends(cls, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : Tuple, *__A : str, **__A : Any ):
requires_backends(cls, ['''flax'''] )
class __UpperCAmelCase ( metaclass=lowerCamelCase__ ):
UpperCamelCase = ["""flax"""]
def __init__( self : Optional[Any], *__A : Optional[Any], **__A : List[str] ):
requires_backends(self, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : Any, *__A : Union[str, Any], **__A : Union[str, Any] ):
requires_backends(cls, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : Optional[int], *__A : Optional[Any], **__A : Any ):
requires_backends(cls, ['''flax'''] )
class __UpperCAmelCase ( metaclass=lowerCamelCase__ ):
UpperCamelCase = ["""flax"""]
def __init__( self : Tuple, *__A : int, **__A : Optional[int] ):
requires_backends(self, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : List[Any], *__A : str, **__A : Optional[int] ):
requires_backends(cls, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : int, *__A : List[str], **__A : Optional[int] ):
requires_backends(cls, ['''flax'''] )
class __UpperCAmelCase ( metaclass=lowerCamelCase__ ):
UpperCamelCase = ["""flax"""]
def __init__( self : Optional[int], *__A : str, **__A : int ):
requires_backends(self, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : str, *__A : Optional[Any], **__A : Any ):
requires_backends(cls, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : Optional[Any], *__A : List[str], **__A : Any ):
requires_backends(cls, ['''flax'''] )
class __UpperCAmelCase ( metaclass=lowerCamelCase__ ):
UpperCamelCase = ["""flax"""]
def __init__( self : str, *__A : List[str], **__A : int ):
requires_backends(self, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : Tuple, *__A : Tuple, **__A : Any ):
requires_backends(cls, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : Optional[Any], *__A : Union[str, Any], **__A : Union[str, Any] ):
requires_backends(cls, ['''flax'''] )
class __UpperCAmelCase ( metaclass=lowerCamelCase__ ):
UpperCamelCase = ["""flax"""]
def __init__( self : str, *__A : Union[str, Any], **__A : Optional[int] ):
requires_backends(self, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : str, *__A : Union[str, Any], **__A : Any ):
requires_backends(cls, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : List[Any], *__A : Tuple, **__A : Tuple ):
requires_backends(cls, ['''flax'''] )
class __UpperCAmelCase ( metaclass=lowerCamelCase__ ):
UpperCamelCase = ["""flax"""]
def __init__( self : Tuple, *__A : Union[str, Any], **__A : int ):
requires_backends(self, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : Dict, *__A : Union[str, Any], **__A : int ):
requires_backends(cls, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : Optional[int], *__A : List[str], **__A : Tuple ):
requires_backends(cls, ['''flax'''] )
class __UpperCAmelCase ( metaclass=lowerCamelCase__ ):
UpperCamelCase = ["""flax"""]
def __init__( self : Tuple, *__A : Dict, **__A : Union[str, Any] ):
requires_backends(self, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : Dict, *__A : List[str], **__A : Tuple ):
requires_backends(cls, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : Optional[Any], *__A : int, **__A : int ):
requires_backends(cls, ['''flax'''] )
class __UpperCAmelCase ( metaclass=lowerCamelCase__ ):
UpperCamelCase = ["""flax"""]
def __init__( self : Optional[int], *__A : int, **__A : Any ):
requires_backends(self, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : Tuple, *__A : int, **__A : Union[str, Any] ):
requires_backends(cls, ['''flax'''] )
@classmethod
def __magic_name__ ( cls : Dict, *__A : Any, **__A : Dict ):
requires_backends(cls, ['''flax'''] )
| 336 |
from __future__ import annotations
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
UpperCAmelCase : str = number_of_bytes // partitions
UpperCAmelCase : Dict = []
for i in range(UpperCAmelCase ):
UpperCAmelCase : int = i * bytes_per_partition + 1
UpperCAmelCase : Optional[int] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 1 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : str, __A : str = "▁", __A : bool = True, __A : Union[str, AddedToken] = "<unk>", __A : Union[str, AddedToken] = "</s>", __A : Union[str, AddedToken] = "<pad>", ):
UpperCAmelCase : Optional[int] = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
UpperCAmelCase : int = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase : Optional[Any] = token_dict['''token''']
UpperCAmelCase : Any = Tokenizer(Unigram() )
UpperCAmelCase : Dict = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ), ''' ''' ),
normalizers.Lowercase(),
] )
UpperCAmelCase : Dict = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__A, add_prefix_space=__A ),
pre_tokenizers.Digits(individual_digits=__A ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase : Optional[Any] = decoders.Metaspace(replacement=__A, add_prefix_space=__A )
UpperCAmelCase : Dict = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''', special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])], )
UpperCAmelCase : Optional[int] = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(__A, __A )
def __magic_name__ ( self : Any, __A : Union[str, List[str]], __A : int = 8_0_0_0, __A : bool = True, ):
UpperCAmelCase : Any = trainers.UnigramTrainer(
vocab_size=__A, special_tokens=self.special_tokens_list, show_progress=__A, )
if isinstance(__A, __A ):
UpperCAmelCase : str = [files]
self._tokenizer.train(__A, trainer=__A )
self.add_unk_id()
def __magic_name__ ( self : Any, __A : Union[Iterator[str], Iterator[Iterator[str]]], __A : int = 8_0_0_0, __A : bool = True, ):
UpperCAmelCase : str = trainers.UnigramTrainer(
vocab_size=__A, special_tokens=self.special_tokens_list, show_progress=__A, )
self._tokenizer.train_from_iterator(__A, trainer=__A )
self.add_unk_id()
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Optional[int] = json.loads(self._tokenizer.to_str() )
UpperCAmelCase : int = self.special_tokens['''unk''']['''id''']
UpperCAmelCase : Any = Tokenizer.from_str(json.dumps(__A ) )
| 336 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_lowerCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def a__ ( UpperCAmelCase : Dict=None ) -> Optional[int]:
if subparsers is not None:
UpperCAmelCase : Tuple = subparsers.add_parser('''tpu-config''' , description=_description )
else:
UpperCAmelCase : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
UpperCAmelCase : Optional[int] = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=UpperCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=UpperCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
UpperCAmelCase : Union[str, Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=UpperCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase : List[str] = defaults.commands
if not args.tpu_name:
UpperCAmelCase : Tuple = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase : int = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCAmelCase : Dict = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ):
UpperCAmelCase : Optional[int] = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
UpperCAmelCase : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , UpperCAmelCase ):
UpperCAmelCase : int = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
UpperCAmelCase : int = '''; '''.join(UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase : Any = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(UpperCAmelCase )}''' )
return
subprocess.run(UpperCAmelCase )
print('''Successfully setup pod.''' )
def a__ ( ) -> Any:
UpperCAmelCase : Any = tpu_command_parser()
UpperCAmelCase : Tuple = parser.parse_args()
tpu_command_launcher(UpperCAmelCase )
| 336 | 1 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = "T5Config"
def a__ ( UpperCAmelCase : jnp.array , UpperCAmelCase : int , UpperCAmelCase : int ) -> jnp.ndarray:
UpperCAmelCase : List[Any] = jnp.zeros_like(UpperCAmelCase )
UpperCAmelCase : Tuple = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : Tuple = shifted_input_ids.at[:, 0].set(UpperCAmelCase )
UpperCAmelCase : Dict = jnp.where(shifted_input_ids == -100 , UpperCAmelCase , UpperCAmelCase )
return shifted_input_ids
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """mt5"""
UpperCamelCase = MTaConfig
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """mt5"""
UpperCamelCase = MTaConfig
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """mt5"""
UpperCamelCase = MTaConfig
| 336 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
print('''Loading config file...''' )
def flatten_yaml_as_dict(UpperCAmelCase : Tuple , UpperCAmelCase : Any="" , UpperCAmelCase : Dict="." ):
UpperCAmelCase : List[str] = []
for k, v in d.items():
UpperCAmelCase : List[Any] = parent_key + sep + k if parent_key else k
if isinstance(UpperCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCAmelCase , UpperCAmelCase , sep=UpperCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCAmelCase )
UpperCAmelCase : List[str] = argparse.Namespace()
with open(UpperCAmelCase , '''r''' ) as yaml_file:
try:
UpperCAmelCase : List[str] = yaml.load(UpperCAmelCase , Loader=yaml.FullLoader )
UpperCAmelCase : Optional[int] = flatten_yaml_as_dict(UpperCAmelCase )
for k, v in flat_cfg.items():
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(UpperCAmelCase , str(UpperCAmelCase ) ) )
return config
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> List[Any]:
UpperCAmelCase : int = MobileViTVaConfig()
UpperCAmelCase : str = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase : Any = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase : Any = 384
else:
UpperCAmelCase : Tuple = 256
UpperCAmelCase : int = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase : Optional[Any] = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase : str = 384
else:
UpperCAmelCase : Dict = 256
UpperCAmelCase : List[Any] = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase : Optional[Any] = 151
UpperCAmelCase : Tuple = 512
UpperCAmelCase : Tuple = '''ade20k-id2label.json'''
UpperCAmelCase : Tuple = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase : Dict = 21
UpperCAmelCase : str = 512
UpperCAmelCase : Union[str, Any] = '''pascal-voc-id2label.json'''
UpperCAmelCase : Dict = True
# orig_config
UpperCAmelCase : List[Any] = load_orig_config_file(UpperCAmelCase )
assert getattr(UpperCAmelCase , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase : Tuple = getattr(UpperCAmelCase , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(UpperCAmelCase , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase : str = getattr(UpperCAmelCase , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase : Any = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase : Union[str, Any] = '''huggingface/label-files'''
UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Any = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : int = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> List[str]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : List[str] = val
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=False ) -> Union[str, Any]:
if base_model:
UpperCAmelCase : Dict = ''''''
else:
UpperCAmelCase : Dict = '''mobilevitv2.'''
UpperCAmelCase : Optional[int] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase : List[str] = k[8:]
else:
UpperCAmelCase : Dict = k
if ".block." in k:
UpperCAmelCase : List[Any] = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase : List[str] = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('''conv_1.''' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase : int = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
UpperCAmelCase : Any = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
UpperCAmelCase : str = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
UpperCAmelCase : int = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase : Dict = [0, 1]
elif i == 4:
UpperCAmelCase : Dict = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase : int = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
UpperCAmelCase : Optional[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
UpperCAmelCase : Any = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase : Any = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase : Tuple = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Any:
UpperCAmelCase : str = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(UpperCAmelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = get_mobilevitva_config(UpperCAmelCase , UpperCAmelCase )
# load original state_dict
UpperCAmelCase : List[str] = torch.load(UpperCAmelCase , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase : str = MobileViTVaForSemanticSegmentation(UpperCAmelCase ).eval()
UpperCAmelCase : str = False
else:
UpperCAmelCase : Union[str, Any] = MobileViTVaForImageClassification(UpperCAmelCase ).eval()
UpperCAmelCase : Any = False
# remove and rename some keys of load the original model
UpperCAmelCase : Optional[Any] = checkpoint
remove_unused_keys(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = create_rename_keys(UpperCAmelCase , base_model=UpperCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# load modified state_dict
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase : Any = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase : Union[str, Any] = model(**UpperCAmelCase )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase : Optional[Any] = outputs.logits
UpperCAmelCase : int = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase : str = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1E-4 )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 336 | 1 |
import re
from filelock import FileLock
try:
import nltk
_lowerCamelCase : Union[str, Any] = True
except (ImportError, ModuleNotFoundError):
_lowerCamelCase : Dict = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def a__ ( UpperCAmelCase : str ) -> str:
re.sub('''<n>''' , '''''' , UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCAmelCase ) )
| 336 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __UpperCAmelCase ( lowerCamelCase__ ):
def __get__( self : Tuple, __A : Optional[Any], __A : Optional[int]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCAmelCase : str = '''__cached_''' + self.fget.__name__
UpperCAmelCase : int = getattr(__A, __A, __A )
if cached is None:
UpperCAmelCase : Any = self.fget(__A )
setattr(__A, __A, __A )
return cached
def a__ ( UpperCAmelCase : Optional[Any] ) -> Any:
UpperCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_torch_fx_proxy(UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Union[str, Any]:
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : str ) -> Tuple:
return _is_numpy(UpperCAmelCase )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
import torch
return isinstance(UpperCAmelCase , torch.Tensor )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
return False if not is_torch_available() else _is_torch(UpperCAmelCase )
def a__ ( UpperCAmelCase : Tuple ) -> List[str]:
import torch
return isinstance(UpperCAmelCase , torch.device )
def a__ ( UpperCAmelCase : Any ) -> Any:
return False if not is_torch_available() else _is_torch_device(UpperCAmelCase )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ):
if hasattr(UpperCAmelCase , UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
else:
return False
return isinstance(UpperCAmelCase , torch.dtype )
def a__ ( UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase )
def a__ ( UpperCAmelCase : Any ) -> str:
import tensorflow as tf
return isinstance(UpperCAmelCase , tf.Tensor )
def a__ ( UpperCAmelCase : int ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[str] ) -> Tuple:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCAmelCase , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(UpperCAmelCase )
return type(UpperCAmelCase ) == tf.Tensor
def a__ ( UpperCAmelCase : int ) -> List[Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[Any] ) -> Dict:
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCAmelCase , jnp.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]:
return False if not is_flax_available() else _is_jax(UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Tuple:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return [to_py_obj(UpperCAmelCase ) for o in obj]
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase ).tolist()
elif isinstance(UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( UpperCAmelCase : Any ) -> List[str]:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return np.array(UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase )
else:
return obj
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(__A ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase : int = getattr(self, class_fields[0].name )
UpperCAmelCase : str = all(getattr(self, field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__A ):
if isinstance(__A, __A ):
UpperCAmelCase : Tuple = first_field.items()
UpperCAmelCase : Any = True
else:
try:
UpperCAmelCase : Optional[Any] = iter(__A )
UpperCAmelCase : Optional[Any] = True
except TypeError:
UpperCAmelCase : Optional[int] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__A ):
if (
not isinstance(__A, (list, tuple) )
or not len(__A ) == 2
or not isinstance(element[0], __A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self, element[0], element[1] )
if element[1] is not None:
UpperCAmelCase : Union[str, Any] = element[1]
elif first_field is not None:
UpperCAmelCase : Union[str, Any] = first_field
else:
for field in class_fields:
UpperCAmelCase : Optional[Any] = getattr(self, field.name )
if v is not None:
UpperCAmelCase : Optional[int] = v
def __delitem__( self : Union[str, Any], *__A : str, **__A : Tuple ):
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : List[str], *__A : Union[str, Any], **__A : Optional[Any] ):
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Any, *__A : Dict, **__A : str ):
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Dict, *__A : int, **__A : Dict ):
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[str], __A : List[str] ):
if isinstance(__A, __A ):
UpperCAmelCase : int = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any], __A : Dict, __A : Union[str, Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__A, __A )
super().__setattr__(__A, __A )
def __setitem__( self : Dict, __A : List[Any], __A : Union[str, Any] ):
# Will raise a KeyException if needed
super().__setitem__(__A, __A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__A, __A )
def __magic_name__ ( self : List[str] ):
return tuple(self[k] for k in self.keys() )
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@classmethod
def __magic_name__ ( cls : List[Any], __A : Tuple ):
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """longest"""
UpperCamelCase = """max_length"""
UpperCamelCase = """do_not_pad"""
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """pt"""
UpperCamelCase = """tf"""
UpperCamelCase = """np"""
UpperCamelCase = """jax"""
class __UpperCAmelCase :
def __init__( self : Any, __A : List[ContextManager] ):
UpperCAmelCase : Tuple = context_managers
UpperCAmelCase : Tuple = ExitStack()
def __enter__( self : Any ):
for context_manager in self.context_managers:
self.stack.enter_context(__A )
def __exit__( self : List[Any], *__A : Union[str, Any], **__A : Dict ):
self.stack.__exit__(*__A, **__A )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> str:
UpperCAmelCase : int = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( UpperCAmelCase : Dict ) -> Any:
UpperCAmelCase : List[Any] = model_class.__name__
UpperCAmelCase : Union[str, Any] = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( UpperCAmelCase : MutableMapping , UpperCAmelCase : str = "" , UpperCAmelCase : str = "." ) -> Union[str, Any]:
def _flatten_dict(UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]="" , UpperCAmelCase : Any="." ):
for k, v in d.items():
UpperCAmelCase : List[str] = str(UpperCAmelCase ) + delimiter + str(UpperCAmelCase ) if parent_key else k
if v and isinstance(UpperCAmelCase , UpperCAmelCase ):
yield from flatten_dict(UpperCAmelCase , UpperCAmelCase , delimiter=UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
@contextmanager
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : bool = False ) -> Optional[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ) -> Optional[Any]:
if is_numpy_array(UpperCAmelCase ):
return np.transpose(UpperCAmelCase , axes=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.T if axes is None else array.permute(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(UpperCAmelCase , perm=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.transpose(UpperCAmelCase , axes=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.reshape(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.reshape(UpperCAmelCase , UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None ) -> Any:
if is_numpy_array(UpperCAmelCase ):
return np.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> str:
if is_numpy_array(UpperCAmelCase ):
return np.expand_dims(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.unsqueeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.size(UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.size(UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Dict:
for key, value in auto_map.items():
if isinstance(UpperCAmelCase , (tuple, list) ):
UpperCAmelCase : List[Any] = [f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase : List[Any] = f'''{repo_id}--{value}'''
return auto_map
def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]:
for base_class in inspect.getmro(UpperCAmelCase ):
UpperCAmelCase : Any = base_class.__module__
UpperCAmelCase : Dict = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 336 | 1 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def __magic_name__ ( *__A : Union[str, Any], **__A : Any ):
pass
@is_pipeline_test
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def __magic_name__ ( self : int, __A : Optional[int], __A : str, __A : List[Any] ):
UpperCAmelCase : Tuple = pipeline('''visual-question-answering''', model='''hf-internal-testing/tiny-vilt-random-vqa''' )
UpperCAmelCase : int = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def __magic_name__ ( self : Any, __A : int, __A : Union[str, Any] ):
UpperCAmelCase : List[str] = vqa_pipeline(__A, top_k=1 )
self.assertEqual(
__A, [
[{'''score''': ANY(__A ), '''answer''': ANY(__A )}],
[{'''score''': ANY(__A ), '''answer''': ANY(__A )}],
], )
@require_torch
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : str = pipeline('''visual-question-answering''', model='''hf-internal-testing/tiny-vilt-random-vqa''' )
UpperCAmelCase : Tuple = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase : Any = '''How many cats are there?'''
UpperCAmelCase : int = vqa_pipeline(image=__A, question='''How many cats are there?''', top_k=2 )
self.assertEqual(
__A, [{'''score''': ANY(__A ), '''answer''': ANY(__A )}, {'''score''': ANY(__A ), '''answer''': ANY(__A )}] )
UpperCAmelCase : Union[str, Any] = vqa_pipeline({'''image''': image, '''question''': question}, top_k=2 )
self.assertEqual(
__A, [{'''score''': ANY(__A ), '''answer''': ANY(__A )}, {'''score''': ANY(__A ), '''answer''': ANY(__A )}] )
@slow
@require_torch
def __magic_name__ ( self : str ):
UpperCAmelCase : Optional[int] = pipeline('''visual-question-answering''', model='''dandelin/vilt-b32-finetuned-vqa''' )
UpperCAmelCase : List[str] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase : Dict = '''How many cats are there?'''
UpperCAmelCase : Optional[int] = vqa_pipeline(image=__A, question=__A, top_k=2 )
self.assertEqual(
nested_simplify(__A, decimals=4 ), [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}] )
UpperCAmelCase : Optional[Any] = vqa_pipeline({'''image''': image, '''question''': question}, top_k=2 )
self.assertEqual(
nested_simplify(__A, decimals=4 ), [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}] )
UpperCAmelCase : List[Any] = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}], top_k=2 )
self.assertEqual(
nested_simplify(__A, decimals=4 ), [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2, )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def __magic_name__ ( self : Optional[int] ):
pass
| 336 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = LayoutLMTokenizer
UpperCamelCase = LayoutLMTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __magic_name__ ( self : Any ):
super().setUp()
UpperCAmelCase : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__ ( self : Union[str, Any], **__A : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **__A )
def __magic_name__ ( self : Optional[int], __A : int ):
UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running'''
UpperCAmelCase : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __magic_name__ ( self : Any ):
UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__A, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), [7, 4, 5, 1_0, 8, 9] )
def __magic_name__ ( self : Optional[int] ):
pass
| 336 | 1 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase__ )
class __UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
@dataclass(frozen=lowerCamelCase__ )
class __UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = 42
def __init__( self : int, __A : str, __A : PreTrainedTokenizer, __A : str, __A : Optional[int] = None, __A : Optional[Any]=False, __A : bool = False, ):
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : List[str] = os.path.join(
__A, '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''', tokenizer.__class__.__name__, str(__A ), __A, ), )
UpperCAmelCase : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : List[Any] = label_list[2], label_list[1]
UpperCAmelCase : str = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : List[str] = cached_features_file + '''.lock'''
with FileLock(__A ):
if os.path.exists(__A ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
UpperCAmelCase : List[Any] = torch.load(__A )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
UpperCAmelCase : List[str] = (
processor.get_dev_examples(__A ) if evaluate else processor.get_train_examples(__A )
)
logger.info('''Training examples: %s''', len(__A ) )
UpperCAmelCase : int = hans_convert_examples_to_features(__A, __A, __A, __A )
logger.info('''Saving features into cached file %s''', __A )
torch.save(self.features, __A )
def __len__( self : Optional[int] ):
return len(self.features )
def __getitem__( self : Union[str, Any], __A : List[Any] ):
return self.features[i]
def __magic_name__ ( self : Optional[Any] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __UpperCAmelCase :
UpperCamelCase = 42
def __init__( self : str, __A : str, __A : PreTrainedTokenizer, __A : str, __A : Optional[int] = 1_2_8, __A : List[Any]=False, __A : bool = False, ):
UpperCAmelCase : Tuple = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Dict = label_list[2], label_list[1]
UpperCAmelCase : Tuple = label_list
UpperCAmelCase : Union[str, Any] = processor.get_dev_examples(__A ) if evaluate else processor.get_train_examples(__A )
UpperCAmelCase : List[Any] = hans_convert_examples_to_features(__A, __A, __A, __A )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ), desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(__A )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : str = tf.data.Dataset.from_generator(
__A, (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
), (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
), )
def __magic_name__ ( self : Union[str, Any] ):
return self.dataset
def __len__( self : Optional[int] ):
return len(self.features )
def __getitem__( self : Tuple, __A : Any ):
return self.features[i]
def __magic_name__ ( self : Dict ):
return self.label_list
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Dict, __A : str ):
return self._create_examples(self._read_tsv(os.path.join(__A, '''heuristics_train_set.txt''' ) ), '''train''' )
def __magic_name__ ( self : Dict, __A : Optional[int] ):
return self._create_examples(self._read_tsv(os.path.join(__A, '''heuristics_evaluation_set.txt''' ) ), '''dev''' )
def __magic_name__ ( self : int ):
return ["contradiction", "entailment", "neutral"]
def __magic_name__ ( self : Tuple, __A : Union[str, Any], __A : Union[str, Any] ):
UpperCAmelCase : str = []
for i, line in enumerate(__A ):
if i == 0:
continue
UpperCAmelCase : List[str] = '''%s-%s''' % (set_type, line[0])
UpperCAmelCase : str = line[5]
UpperCAmelCase : Optional[Any] = line[6]
UpperCAmelCase : Tuple = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
UpperCAmelCase : Dict = line[0]
examples.append(InputExample(guid=__A, text_a=__A, text_b=__A, label=__A, pairID=__A ) )
return examples
def a__ ( UpperCAmelCase : List[InputExample] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : PreTrainedTokenizer , ) -> List[str]:
UpperCAmelCase : str = {label: i for i, label in enumerate(UpperCAmelCase )}
UpperCAmelCase : List[str] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
UpperCAmelCase : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , truncation=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , )
UpperCAmelCase : List[Any] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Dict = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'''guid: {example}''' )
logger.info(f'''features: {features[i]}''' )
return features
_lowerCamelCase : List[str] = {
"hans": 3,
}
_lowerCamelCase : int = {
"hans": HansProcessor,
}
| 336 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
def __init__( self : Any, __A : str, __A : Dict=1_3, __A : int=3_0, __A : Tuple=2, __A : Union[str, Any]=3, __A : Any=True, __A : str=True, __A : Dict=3_2, __A : List[Any]=2, __A : Optional[Any]=4, __A : Union[str, Any]=3_7, __A : int="gelu", __A : int=0.1, __A : List[Any]=0.1, __A : Tuple=1_0, __A : Tuple=0.0_2, __A : Any=3, __A : List[str]=0.6, __A : Any=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Tuple = mask_ratio
UpperCAmelCase : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase : Tuple = (image_size // patch_size) ** 2
UpperCAmelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Optional[Any] ):
return ViTMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, decoder_hidden_size=self.hidden_size, decoder_num_hidden_layers=self.num_hidden_layers, decoder_num_attention_heads=self.num_attention_heads, decoder_intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, )
def __magic_name__ ( self : str, __A : List[Any], __A : Any, __A : Any ):
UpperCAmelCase : Optional[Any] = TFViTMAEModel(config=__A )
UpperCAmelCase : Tuple = model(__A, training=__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : str, __A : int, __A : str ):
UpperCAmelCase : Dict = TFViTMAEForPreTraining(__A )
UpperCAmelCase : int = model(__A, training=__A )
# expected sequence length = num_patches
UpperCAmelCase : int = (self.image_size // self.patch_size) ** 2
UpperCAmelCase : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase : Tuple = 1
UpperCAmelCase : List[Any] = TFViTMAEForPreTraining(__A )
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = model(__A, training=__A )
UpperCAmelCase : Union[str, Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Union[str, Any] = config_and_inputs
UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCamelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = TFViTMAEModelTester(self )
UpperCAmelCase : int = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[str] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
UpperCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, tf.keras.layers.Layer ) )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : int = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def __magic_name__ ( self : int ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : str = model_class(__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
UpperCAmelCase : Dict = model(__A, noise=__A )
UpperCAmelCase : Any = copy.deepcopy(self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Union[str, Any] = model(**__A, noise=__A )
UpperCAmelCase : Dict = outputs_dict[0].numpy()
UpperCAmelCase : Tuple = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ), 1E-6 )
def __magic_name__ ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : str = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__A : Union[str, Any] ):
UpperCAmelCase : str = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__A ):
UpperCAmelCase : Tuple = v.numpy()
else:
UpperCAmelCase : str = np.array(__A )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
UpperCAmelCase : Any = self._prepare_for_class(__A, __A )
UpperCAmelCase : Optional[int] = prepare_numpy_arrays(__A )
UpperCAmelCase : str = model(__A, noise=__A )
UpperCAmelCase : str = model(**__A, noise=__A )
self.assert_outputs_same(__A, __A )
def __magic_name__ ( self : int, __A : str, __A : Union[str, Any], __A : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase : Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : int = tf.constant(__A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase : List[Any] = tf_noise
super().check_pt_tf_models(__A, __A, __A )
def __magic_name__ ( self : str ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__A )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(__A, __A ),)
if isinstance(__A, __A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__A, '''_keras_serializable''', __A )
}
UpperCAmelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : str = tf.convert_to_tensor(__A )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
UpperCAmelCase : Tuple = main_layer_class(__A )
UpperCAmelCase : int = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCAmelCase : List[Any] = tf.keras.Model(__A, outputs=main_layer(__A ) )
UpperCAmelCase : List[Any] = model(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__A, '''keras_model.h5''' )
model.save(__A )
UpperCAmelCase : List[str] = tf.keras.models.load_model(
__A, custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__A, tf.keras.Model )
UpperCAmelCase : Tuple = model(__A )
self.assert_outputs_same(__A, __A )
@slow
def __magic_name__ ( self : Dict ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[str] = self._prepare_for_class(__A, __A )
UpperCAmelCase : Union[str, Any] = model(__A, noise=__A )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : Optional[int] = outputs.last_hidden_state.numpy()
UpperCAmelCase : Union[str, Any] = 0
else:
UpperCAmelCase : Optional[int] = outputs.logits.numpy()
UpperCAmelCase : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A, saved_model=__A )
UpperCAmelCase : Dict = model_class.from_pretrained(__A )
UpperCAmelCase : str = model(__A, noise=__A )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : int = after_outputs['''last_hidden_state'''].numpy()
UpperCAmelCase : Dict = 0
else:
UpperCAmelCase : Any = after_outputs['''logits'''].numpy()
UpperCAmelCase : Dict = 0
UpperCAmelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A, 1E-5 )
def __magic_name__ ( self : Optional[Any] ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
UpperCAmelCase : List[Any] = model(__A, noise=__A )
UpperCAmelCase : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__A )
UpperCAmelCase : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCAmelCase : str = model_class.from_config(model.config )
UpperCAmelCase : List[str] = new_model(__A ) # Build model
new_model.set_weights(model.get_weights() )
UpperCAmelCase : Tuple = new_model(__A, noise=__A )
self.assert_outputs_same(__A, __A )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __magic_name__ ( self : Tuple ):
pass
@slow
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__A )
def a__ ( ) -> Dict:
UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[str] ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : str ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase : Tuple = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
UpperCAmelCase : List[str] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : str = image_processor(images=__A, return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase : Optional[int] = ViTMAEConfig()
UpperCAmelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCAmelCase : Optional[int] = model(**__A, noise=__A )
# verify the logits
UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : List[str] = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3], __A, atol=1E-4 )
| 336 | 1 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
class __UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = None
@staticmethod
def __magic_name__ ( ):
raise NotImplementedError
def __magic_name__ ( self : Union[str, Any], __A : Union[str, Any], __A : int, __A : str, **__A : str ):
raise NotImplementedError
def __magic_name__ ( self : str, __A : Dict ):
raise NotImplementedError
def __magic_name__ ( self : Optional[int] ):
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def __magic_name__ ( cls : str ):
return F'''`pip install {cls.pip_package or cls.name}`'''
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """optuna"""
@staticmethod
def __magic_name__ ( ):
return is_optuna_available()
def __magic_name__ ( self : Optional[Any], __A : Dict, __A : int, __A : str, **__A : Any ):
return run_hp_search_optuna(__A, __A, __A, **__A )
def __magic_name__ ( self : List[Any], __A : Dict ):
return default_hp_space_optuna(__A )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """ray"""
UpperCamelCase = """'ray[tune]'"""
@staticmethod
def __magic_name__ ( ):
return is_ray_available()
def __magic_name__ ( self : Union[str, Any], __A : int, __A : int, __A : str, **__A : List[Any] ):
return run_hp_search_ray(__A, __A, __A, **__A )
def __magic_name__ ( self : Any, __A : List[str] ):
return default_hp_space_ray(__A )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """sigopt"""
@staticmethod
def __magic_name__ ( ):
return is_sigopt_available()
def __magic_name__ ( self : str, __A : int, __A : int, __A : str, **__A : int ):
return run_hp_search_sigopt(__A, __A, __A, **__A )
def __magic_name__ ( self : List[str], __A : Optional[Any] ):
return default_hp_space_sigopt(__A )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """wandb"""
@staticmethod
def __magic_name__ ( ):
return is_wandb_available()
def __magic_name__ ( self : Union[str, Any], __A : int, __A : int, __A : str, **__A : Optional[Any] ):
return run_hp_search_wandb(__A, __A, __A, **__A )
def __magic_name__ ( self : Any, __A : Tuple ):
return default_hp_space_wandb(__A )
_lowerCamelCase : Any = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def a__ ( ) -> str:
UpperCAmelCase : Union[str, Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(UpperCAmelCase ) > 0:
UpperCAmelCase : List[str] = available_backends[0].name
if len(UpperCAmelCase ) > 1:
logger.info(
f'''{len(UpperCAmelCase )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 336 |
def a__ ( UpperCAmelCase : int ) -> int:
UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , UpperCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_lowerCamelCase : List[Any] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
_lowerCamelCase : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 336 | 1 |
def a__ ( UpperCAmelCase : str ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
from __future__ import annotations
def a__ ( UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase )
# We need to create solution object to save path.
UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )]
UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase )
if solved:
print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Dict = len(UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase : Dict = 1
return True
UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase : str = 1
# check for directions
if (
run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase )
or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase )
):
return True
UpperCAmelCase : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 1 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowerCamelCase : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __UpperCAmelCase ( datasets.BuilderConfig ):
UpperCamelCase = None
def a__ ( UpperCAmelCase : "pyspark.sql.DataFrame" , UpperCAmelCase : List[int] , ) -> Optional[int]:
import pyspark
def generate_fn():
UpperCAmelCase : Any = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
UpperCAmelCase : Optional[Any] = df_with_partition_id.select('''*''' ).where(f'''part_id = {partition_id}''' ).drop('''part_id''' )
UpperCAmelCase : Optional[Any] = partition_df.collect()
UpperCAmelCase : Optional[int] = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __UpperCAmelCase ( _BaseExamplesIterable ):
def __init__( self : Union[str, Any], __A : "pyspark.sql.DataFrame", __A : List[str]=None, ):
UpperCAmelCase : List[Any] = df
UpperCAmelCase : Optional[int] = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCAmelCase : str = _generate_iterable_examples(self.df, self.partition_order )
def __iter__( self : Dict ):
yield from self.generate_examples_fn()
def __magic_name__ ( self : Union[str, Any], __A : np.random.Generator ):
UpperCAmelCase : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__A )
return SparkExamplesIterable(self.df, partition_order=__A )
def __magic_name__ ( self : List[str], __A : int, __A : int ):
UpperCAmelCase : Any = self.split_shard_indices_by_worker(__A, __A )
return SparkExamplesIterable(self.df, partition_order=__A )
@property
def __magic_name__ ( self : Union[str, Any] ):
return len(self.partition_order )
class __UpperCAmelCase ( datasets.DatasetBuilder ):
UpperCamelCase = SparkConfig
def __init__( self : Any, __A : "pyspark.sql.DataFrame", __A : str = None, __A : str = None, **__A : Dict, ):
import pyspark
UpperCAmelCase : List[str] = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCAmelCase : Optional[Any] = df
UpperCAmelCase : Tuple = working_dir
super().__init__(
cache_dir=__A, config_name=str(self.df.semanticHash() ), **__A, )
def __magic_name__ ( self : Optional[Any] ):
# Returns the path of the created file.
def create_cache_and_write_probe(__A : int ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir, exist_ok=__A )
UpperCAmelCase : str = os.path.join(self._cache_dir, '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__A, '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''', '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCAmelCase : Tuple = (
self._spark.sparkContext.parallelize(range(1 ), 1 ).mapPartitions(__A ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def __magic_name__ ( self : Dict ):
return datasets.DatasetInfo(features=self.config.features )
def __magic_name__ ( self : str, __A : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __magic_name__ ( self : int, __A : Tuple ):
import pyspark
def get_arrow_batch_size(__A : List[Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
UpperCAmelCase : List[str] = self.df.count()
UpperCAmelCase : Optional[int] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCAmelCase : List[str] = (
self.df.limit(__A )
.repartition(1 )
.mapInArrow(__A, '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCAmelCase : Optional[int] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCAmelCase : Tuple = min(__A, int(approx_total_size / max_shard_size ) )
UpperCAmelCase : Union[str, Any] = self.df.repartition(__A )
def __magic_name__ ( self : Optional[Any], __A : str, __A : str, __A : int, ):
import pyspark
UpperCAmelCase : Tuple = ParquetWriter if file_format == '''parquet''' else ArrowWriter
UpperCAmelCase : Dict = os.path.join(self._working_dir, os.path.basename(__A ) ) if self._working_dir else fpath
UpperCAmelCase : Optional[int] = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCAmelCase : str = self.config.features
UpperCAmelCase : Tuple = self._writer_batch_size
UpperCAmelCase : int = self._fs.storage_options
def write_arrow(__A : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCAmelCase : Any = pyspark.TaskContext().taskAttemptId()
UpperCAmelCase : Tuple = next(__A, __A )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]], names=['''task_id''', '''num_examples''', '''num_bytes'''], )
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : Optional[Any] = writer_class(
features=__A, path=working_fpath.replace('''SSSSS''', F'''{shard_id:05d}''' ).replace('''TTTTT''', F'''{task_id:05d}''' ), writer_batch_size=__A, storage_options=__A, embed_local_files=__A, )
UpperCAmelCase : int = pa.Table.from_batches([first_batch] )
writer.write_table(__A )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCAmelCase , UpperCAmelCase : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['''task_id''', '''num_examples''', '''num_bytes'''], )
shard_id += 1
UpperCAmelCase : int = writer_class(
features=writer._features, path=working_fpath.replace('''SSSSS''', F'''{shard_id:05d}''' ).replace('''TTTTT''', F'''{task_id:05d}''' ), writer_batch_size=__A, storage_options=__A, embed_local_files=__A, )
UpperCAmelCase : List[str] = pa.Table.from_batches([batch] )
writer.write_table(__A )
if writer._num_bytes > 0:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['''task_id''', '''num_examples''', '''num_bytes'''], )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__A ) ):
UpperCAmelCase : Any = os.path.join(os.path.dirname(__A ), os.path.basename(__A ) )
shutil.move(__A, __A )
UpperCAmelCase : str = (
self.df.mapInArrow(__A, '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ), pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ), pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ), pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ), )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __magic_name__ ( self : Union[str, Any], __A : "datasets.SplitGenerator", __A : str = "arrow", __A : Optional[Union[str, int]] = None, __A : Optional[int] = None, **__A : List[str], ):
self._validate_cache_dir()
UpperCAmelCase : List[Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__A )
UpperCAmelCase : Dict = not is_remote_filesystem(self._fs )
UpperCAmelCase : Any = os.path.join if is_local else posixpath.join
UpperCAmelCase : int = '''-TTTTT-SSSSS-of-NNNNN'''
UpperCAmelCase : Optional[Any] = F'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
UpperCAmelCase : Tuple = path_join(self._output_dir, __A )
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Any = 0
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : int = []
UpperCAmelCase : Union[str, Any] = []
for task_id, content in self._prepare_split_single(__A, __A, __A ):
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : str = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__A )
UpperCAmelCase : List[str] = total_num_examples
UpperCAmelCase : int = total_num_bytes
# should rename everything at the end
logger.debug(F'''Renaming {total_shards} shards.''' )
if total_shards > 1:
UpperCAmelCase : Dict = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCAmelCase : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__A : int, __A : int, __A : int, ):
rename(
__A, fpath.replace('''SSSSS''', F'''{shard_id:05d}''' ).replace('''TTTTT''', F'''{task_id:05d}''' ), fpath.replace('''TTTTT-SSSSS''', F'''{global_shard_id:05d}''' ).replace('''NNNNN''', F'''{total_shards:05d}''' ), )
UpperCAmelCase : Any = []
UpperCAmelCase : Tuple = 0
for i in range(len(__A ) ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = task_id_and_num_shards[i]
for shard_id in range(__A ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__A, len(__A ) ).map(lambda __A : _rename_shard(*__A ) ).collect()
else:
# don't use any pattern
UpperCAmelCase : str = 0
UpperCAmelCase : int = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''', F'''{shard_id:05d}''' ).replace('''TTTTT''', F'''{task_id:05d}''' ), fpath.replace(__A, '''''' ), )
def __magic_name__ ( self : Optional[Any], __A : "datasets.SplitGenerator", ):
return SparkExamplesIterable(self.df )
| 336 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __UpperCAmelCase :
def __init__( self : List[Any], __A : List[str], __A : List[str]=1_3, __A : Any=6_4, __A : Optional[Any]=2, __A : str=3, __A : str=True, __A : str=True, __A : Optional[Any]=3_2, __A : List[str]=5, __A : int=4, __A : str=3_7, __A : str="gelu", __A : Dict=0.1, __A : List[Any]=0.1, __A : Dict=1_0, __A : int=0.0_2, __A : Any=[1, 1_6, 4, 4], __A : Optional[int]=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : List[str] = patch_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : int = scope
UpperCAmelCase : List[str] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase : str = (self.image_size // 3_2) ** 2
UpperCAmelCase : List[str] = num_patches + 1
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : str = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Any ):
UpperCAmelCase : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__A, )
def __magic_name__ ( self : Optional[int], __A : Optional[int], __A : int, __A : Tuple ):
UpperCAmelCase : int = ViTHybridModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : Dict, __A : str, __A : List[str] ):
UpperCAmelCase : str = self.type_sequence_label_size
UpperCAmelCase : List[Any] = ViTHybridForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : int ):
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs
UpperCAmelCase : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = ViTHybridModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : int ):
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, nn.Linear ) )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(__A )
UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = _config_zero_init(__A )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(config=__A )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase : Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@slow
def __magic_name__ ( self : List[str] ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Union[str, Any] = ViTHybridModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> Tuple:
UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : str ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
UpperCAmelCase : Tuple = self.default_image_processor
UpperCAmelCase : int = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**__A )
# verify the logits
UpperCAmelCase : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Optional[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
@require_accelerate
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Union[str, Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' )
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=__A, return_tensors='''pt''' )
UpperCAmelCase : Dict = model(**__A )
UpperCAmelCase : Any = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase : Dict = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
| 336 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowerCamelCase : Dict = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
_lowerCamelCase : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode("utf-8").split()
_lowerCamelCase : List[Any] = "|".join(sys.argv[1:])
_lowerCamelCase : Optional[int] = re.compile(Rf"""^({joined_dirs}).*?\.py$""")
_lowerCamelCase : int = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 336 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def a__ ( ) -> tuple[list[int], int]:
UpperCAmelCase : str = [randint(-1_000 , 1_000 ) for i in range(10 )]
UpperCAmelCase : Any = randint(-5_000 , 5_000 )
return (arr, r)
_lowerCamelCase : Any = make_dataset()
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, ...]:
for triplet in permutations(UpperCAmelCase , 3 ):
if sum(UpperCAmelCase ) == target:
return tuple(sorted(UpperCAmelCase ) )
return (0, 0, 0)
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, int, int]:
arr.sort()
UpperCAmelCase : Tuple = len(UpperCAmelCase )
for i in range(n - 1 ):
UpperCAmelCase , UpperCAmelCase : int = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def a__ ( ) -> tuple[float, float]:
UpperCAmelCase : Union[str, Any] = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
UpperCAmelCase : Tuple = '''
triplet_sum1(*dataset)
'''
UpperCAmelCase : List[str] = '''
triplet_sum2(*dataset)
'''
UpperCAmelCase : Tuple = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 )
UpperCAmelCase : str = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 )
return (min(UpperCAmelCase ), min(UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCamelCase : int = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 336 | 1 |
import argparse
from collections import defaultdict
import yaml
_lowerCamelCase : str = "docs/source/en/_toctree.yml"
def a__ ( UpperCAmelCase : str ) -> List[str]:
UpperCAmelCase : Tuple = defaultdict(UpperCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : Union[str, Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : List[str] = []
for duplicate_key in duplicates:
UpperCAmelCase : str = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(UpperCAmelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(UpperCAmelCase , key=lambda UpperCAmelCase : s["title"].lower() )
def a__ ( UpperCAmelCase : str=False ) -> List[Any]:
with open(UpperCAmelCase , encoding='''utf-8''' ) as f:
UpperCAmelCase : Dict = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : List[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : List[Any] = content[api_idx]['''sections''']
# Then to the model doc
UpperCAmelCase : Tuple = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : Optional[int] = api_doc[model_idx]['''sections''']
UpperCAmelCase : Tuple = [(idx, section) for idx, section in enumerate(UpperCAmelCase ) if '''sections''' in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : str = modality_doc['''sections''']
UpperCAmelCase : Union[str, Any] = clean_model_doc_toc(UpperCAmelCase )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Any = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Tuple = model_doc
UpperCAmelCase : Any = api_doc
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(UpperCAmelCase , allow_unicode=UpperCAmelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowerCamelCase : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 336 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __UpperCAmelCase :
def __magic_name__ ( self : int, __A : Dict ):
raise NotImplementedError()
def __magic_name__ ( self : int ):
raise NotImplementedError()
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : str, __A : "AutoTokenizer", __A : bool = False, **__A : str ):
UpperCAmelCase : List[str] = tokenizer
UpperCAmelCase : str = skip_prompt
UpperCAmelCase : List[str] = decode_kwargs
# variables used in the streaming process
UpperCAmelCase : Dict = []
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = True
def __magic_name__ ( self : Dict, __A : Optional[int] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
UpperCAmelCase : Union[str, Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase : Optional[int] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase : Any = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase : Optional[Any] = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def __magic_name__ ( self : str ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
UpperCAmelCase : int = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
UpperCAmelCase : Dict = text[self.print_len :]
UpperCAmelCase : List[Any] = []
UpperCAmelCase : List[Any] = 0
else:
UpperCAmelCase : Dict = ''''''
UpperCAmelCase : str = True
self.on_finalized_text(__A, stream_end=__A )
def __magic_name__ ( self : List[str], __A : str, __A : bool = False ):
print(__A, flush=__A, end='''''' if not stream_end else None )
def __magic_name__ ( self : List[Any], __A : Optional[int] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Dict, __A : "AutoTokenizer", __A : bool = False, __A : Optional[float] = None, **__A : str ):
super().__init__(__A, __A, **__A )
UpperCAmelCase : Dict = Queue()
UpperCAmelCase : Any = None
UpperCAmelCase : Any = timeout
def __magic_name__ ( self : Dict, __A : str, __A : bool = False ):
self.text_queue.put(__A, timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal, timeout=self.timeout )
def __iter__( self : int ):
return self
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 336 | 1 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 336 |
import numpy
# List of input, output pairs
_lowerCamelCase : Dict = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
_lowerCamelCase : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
_lowerCamelCase : Dict = [2, 4, 1, 5]
_lowerCamelCase : Dict = len(train_data)
_lowerCamelCase : int = 0.0_0_9
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]="train" ) -> Dict:
return calculate_hypothesis_value(UpperCAmelCase , UpperCAmelCase ) - output(
UpperCAmelCase , UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Any:
UpperCAmelCase : str = 0
for i in range(len(UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : str=m ) -> Dict:
UpperCAmelCase : Optional[int] = 0
for i in range(UpperCAmelCase ):
if index == -1:
summation_value += _error(UpperCAmelCase )
else:
summation_value += _error(UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def a__ ( UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase : Dict = summation_of_cost_derivative(UpperCAmelCase , UpperCAmelCase ) / m
return cost_derivative_value
def a__ ( ) -> List[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase : List[str] = 0.000002
UpperCAmelCase : Any = 0
UpperCAmelCase : Dict = 0
while True:
j += 1
UpperCAmelCase : List[Any] = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase ) ):
UpperCAmelCase : List[str] = get_cost_derivative(i - 1 )
UpperCAmelCase : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase , rtol=UpperCAmelCase , ):
break
UpperCAmelCase : int = temp_parameter_vector
print(('''Number of iterations:''', j) )
def a__ ( ) -> List[Any]:
for i in range(len(UpperCAmelCase ) ):
print(('''Actual output value:''', output(UpperCAmelCase , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(UpperCAmelCase , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 336 | 1 |
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> str:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
UpperCAmelCase : int = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[Any] = len(UpperCAmelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
UpperCAmelCase : Optional[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCAmelCase : Any = left
UpperCAmelCase : List[str] = point
elif point > right:
UpperCAmelCase : Any = right
UpperCAmelCase : List[str] = point
else:
if item < current_item:
UpperCAmelCase : Optional[int] = point - 1
else:
UpperCAmelCase : str = point + 1
return None
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ) -> Dict:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
elif point > right:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , point + 1 , UpperCAmelCase )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> int:
if collection != sorted(UpperCAmelCase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
_lowerCamelCase : Optional[int] = 0
if debug == 1:
_lowerCamelCase : Dict = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
_lowerCamelCase : List[Any] = 6_7
_lowerCamelCase : Optional[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 336 | 1 |
import numpy
# List of input, output pairs
_lowerCamelCase : Dict = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
_lowerCamelCase : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
_lowerCamelCase : Dict = [2, 4, 1, 5]
_lowerCamelCase : Dict = len(train_data)
_lowerCamelCase : int = 0.0_0_9
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]="train" ) -> Dict:
return calculate_hypothesis_value(UpperCAmelCase , UpperCAmelCase ) - output(
UpperCAmelCase , UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Any:
UpperCAmelCase : str = 0
for i in range(len(UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : str=m ) -> Dict:
UpperCAmelCase : Optional[int] = 0
for i in range(UpperCAmelCase ):
if index == -1:
summation_value += _error(UpperCAmelCase )
else:
summation_value += _error(UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def a__ ( UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase : Dict = summation_of_cost_derivative(UpperCAmelCase , UpperCAmelCase ) / m
return cost_derivative_value
def a__ ( ) -> List[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase : List[str] = 0.000002
UpperCAmelCase : Any = 0
UpperCAmelCase : Dict = 0
while True:
j += 1
UpperCAmelCase : List[Any] = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase ) ):
UpperCAmelCase : List[str] = get_cost_derivative(i - 1 )
UpperCAmelCase : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase , rtol=UpperCAmelCase , ):
break
UpperCAmelCase : int = temp_parameter_vector
print(('''Number of iterations:''', j) )
def a__ ( ) -> List[Any]:
for i in range(len(UpperCAmelCase ) ):
print(('''Actual output value:''', output(UpperCAmelCase , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(UpperCAmelCase , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 336 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else ''''''
UpperCAmelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''),
(f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any:
for i in range(config.num_hidden_layers ):
UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : str = q_bias
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase : str = gamma_a
UpperCAmelCase : Dict = gamma_a
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : str = val
def a__ ( ) -> Optional[int]:
UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]:
UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True
UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase : List[Any] = 1_024
UpperCAmelCase : Optional[Any] = 4_096
UpperCAmelCase : Any = 24
UpperCAmelCase : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : List[Any] = '''huggingface/label-files'''
UpperCAmelCase : Any = '''rvlcdip-id2label.json'''
UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = idalabel
UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model''']
UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase )
# load HuggingFace model
UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase )
model.eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image
UpperCAmelCase : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase )
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
UpperCAmelCase : str = encoding['''pixel_values''']
UpperCAmelCase : Any = model(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify logits
UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected"
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
if has_lm_head:
UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 336 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_lowerCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def a__ ( UpperCAmelCase : Dict=None ) -> Optional[int]:
if subparsers is not None:
UpperCAmelCase : Tuple = subparsers.add_parser('''tpu-config''' , description=_description )
else:
UpperCAmelCase : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
UpperCAmelCase : Optional[int] = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=UpperCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=UpperCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
UpperCAmelCase : Union[str, Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=UpperCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase : List[str] = defaults.commands
if not args.tpu_name:
UpperCAmelCase : Tuple = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase : int = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCAmelCase : Dict = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ):
UpperCAmelCase : Optional[int] = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
UpperCAmelCase : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , UpperCAmelCase ):
UpperCAmelCase : int = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
UpperCAmelCase : int = '''; '''.join(UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase : Any = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(UpperCAmelCase )}''' )
return
subprocess.run(UpperCAmelCase )
print('''Successfully setup pod.''' )
def a__ ( ) -> Any:
UpperCAmelCase : Any = tpu_command_parser()
UpperCAmelCase : Tuple = parser.parse_args()
tpu_command_launcher(UpperCAmelCase )
| 336 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[int], __A : Optional[int], __A : Any=1_3, __A : str=7, __A : Optional[int]=True, __A : Tuple=True, __A : Union[str, Any]=True, __A : Any=True, __A : Optional[int]=9_9, __A : Tuple=3_2, __A : str=5, __A : Union[str, Any]=4, __A : List[str]=3_7, __A : Tuple="gelu", __A : Optional[int]=0.1, __A : int=0.1, __A : Optional[Any]=5_1_2, __A : int=1_6, __A : Optional[Any]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=4, ):
UpperCAmelCase : Any = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Any = seq_length
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : str = use_attention_mask
UpperCAmelCase : List[str] = use_token_type_ids
UpperCAmelCase : int = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = num_choices
def __magic_name__ ( self : str ):
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase : List[Any] = None
if self.use_attention_mask:
UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Any = None
if self.use_token_type_ids:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = RobertaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__A, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ ( self : int ):
UpperCAmelCase : Any = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = config_and_inputs
UpperCAmelCase : Any = True
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Dict = FlaxRobertaModelTester(self )
@slow
def __magic_name__ ( self : Any ):
for model_class_name in self.all_model_classes:
UpperCAmelCase : Dict = model_class_name.from_pretrained('''roberta-base''', from_pt=__A )
UpperCAmelCase : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
| 336 | 1 |
from math import factorial
_lowerCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def a__ ( UpperCAmelCase : int ) -> int:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(UpperCAmelCase ) )
def a__ ( UpperCAmelCase : int = 60 , UpperCAmelCase : int = 1_000_000 ) -> int:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
UpperCAmelCase : Union[str, Any] = 0
# the cached sizes of the previous chains
UpperCAmelCase : dict[int, int] = {}
for start_chain_element in range(1 , UpperCAmelCase ):
# The temporary set will contain the elements of the chain
UpperCAmelCase : Any = set()
UpperCAmelCase : Union[str, Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCAmelCase : str = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(UpperCAmelCase )
chain_set_length += 1
UpperCAmelCase : List[Any] = digit_factorial_sum(UpperCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCAmelCase : Dict = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution()}""")
| 336 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {"vocab_file": "vocab.txt"}
_lowerCamelCase : List[str] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase : List[Any] = {
"facebook/esm2_t6_8M_UR50D": 1_0_2_4,
"facebook/esm2_t12_35M_UR50D": 1_0_2_4,
}
def a__ ( UpperCAmelCase : List[str] ) -> Any:
with open(UpperCAmelCase , '''r''' ) as f:
UpperCAmelCase : Dict = f.read().splitlines()
return [l.strip() for l in lines]
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Any, __A : Dict, __A : List[Any]="<unk>", __A : List[str]="<cls>", __A : Any="<pad>", __A : Union[str, Any]="<mask>", __A : int="<eos>", **__A : Tuple, ):
super().__init__(**__A )
UpperCAmelCase : Tuple = load_vocab_file(__A )
UpperCAmelCase : List[Any] = dict(enumerate(self.all_tokens ) )
UpperCAmelCase : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase : Union[str, Any] = unk_token
UpperCAmelCase : Optional[Any] = cls_token
UpperCAmelCase : Optional[int] = pad_token
UpperCAmelCase : Optional[int] = mask_token
UpperCAmelCase : List[str] = eos_token
UpperCAmelCase : Optional[Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __magic_name__ ( self : Tuple, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : List[Any], __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : Optional[Any], **__A : Union[str, Any] ):
return text.split()
def __magic_name__ ( self : Optional[int], __A : Dict=False ):
return len(self._id_to_token )
def __magic_name__ ( self : int ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __magic_name__ ( self : Tuple, __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : Union[str, Any], __A : List[int], __A : Optional[List[int]] = None ):
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : Optional[int] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __magic_name__ ( self : Any, __A : List, __A : Optional[List] = None, __A : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase : Dict = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
mask += [0] * len(__A ) + [1]
return mask
def __magic_name__ ( self : Optional[int], __A : List[Any], __A : Dict ):
UpperCAmelCase : Union[str, Any] = os.path.join(__A, (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(__A, '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __magic_name__ ( self : Dict ):
return self.get_vocab_size(with_added_tokens=__A )
def __magic_name__ ( self : Optional[int], __A : Union[List[str], List[AddedToken]], __A : bool = False ):
return super()._add_tokens(__A, special_tokens=__A )
| 336 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def a__ ( *UpperCAmelCase : int ) -> Optional[int]:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
UpperCAmelCase : Any = list(UpperCAmelCase )
for i in range(len(UpperCAmelCase ) ):
UpperCAmelCase : Optional[int] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def a__ ( UpperCAmelCase : Exception ) -> bool:
UpperCAmelCase : Union[str, Any] = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def a__ ( UpperCAmelCase : callable = None , UpperCAmelCase : int = 128 ) -> List[str]:
if function is None:
return functools.partial(UpperCAmelCase , starting_batch_size=UpperCAmelCase )
UpperCAmelCase : int = starting_batch_size
def decorator(*UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Union[str, Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCAmelCase : List[str] = list(inspect.signature(UpperCAmelCase ).parameters.keys() )
# Guard against user error
if len(UpperCAmelCase ) < (len(UpperCAmelCase ) + 1):
UpperCAmelCase : Dict = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
except Exception as e:
if should_reduce_batch_size(UpperCAmelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 336 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A, '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__A, '''depth_multiplier''' ) )
class __UpperCAmelCase :
def __init__( self : int, __A : List[Any], __A : str=1_3, __A : Dict=3, __A : int=3_2, __A : int=0.2_5, __A : List[str]=8, __A : int=8, __A : Dict=6, __A : str=3_2, __A : Any=True, __A : str=True, __A : int=True, __A : Union[str, Any]="relu6", __A : Any=1_2_8_0, __A : List[Any]=0.1, __A : Optional[Any]=0.0_2, __A : Tuple=True, __A : List[Any]=True, __A : str=1_0, __A : Optional[Any]=None, ):
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : str = image_size
UpperCAmelCase : Optional[int] = depth_multiplier
UpperCAmelCase : Union[str, Any] = depth_divisible_by
UpperCAmelCase : Optional[Any] = min_depth
UpperCAmelCase : List[str] = expand_ratio
UpperCAmelCase : Dict = tf_padding
UpperCAmelCase : str = output_stride
UpperCAmelCase : Union[str, Any] = first_layer_is_expansion
UpperCAmelCase : List[Any] = finegrained_output
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCAmelCase : Optional[Any] = classifier_dropout_prob
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Any = scope
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Dict = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def __magic_name__ ( self : List[Any], __A : Dict, __A : Optional[Any], __A : Optional[int], __A : Union[str, Any] ):
UpperCAmelCase : Any = MobileNetVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
self.parent.assertEqual(
result.pooler_output.shape, (self.batch_size, self.last_hidden_size), )
def __magic_name__ ( self : str, __A : Union[str, Any], __A : Dict, __A : Optional[Any], __A : str ):
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : Any = MobileNetVaForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : List[str], __A : Dict, __A : Dict ):
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Dict = MobileNetVaForSemanticSegmentation(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCAmelCase : Optional[Any] = model(__A, labels=__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = MobileNetVaModelTester(self )
UpperCAmelCase : List[Any] = MobileNetVaConfigTester(self, config_class=__A, has_text_modality=__A )
def __magic_name__ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Tuple ):
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def __magic_name__ ( self : Any ):
pass
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : int ):
def check_hidden_states_output(__A : Any, __A : Optional[Any], __A : str ):
UpperCAmelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Optional[Any] = outputs.hidden_states
UpperCAmelCase : List[Any] = 1_6
self.assertEqual(len(__A ), __A )
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def __magic_name__ ( self : Dict ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = MobileNetVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> int:
UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[Any] ):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A )
UpperCAmelCase : Optional[int] = self.default_image_processor
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : str = model(**__A )
# verify the logits
UpperCAmelCase : int = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = model.to(__A )
UpperCAmelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**__A )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase : Tuple = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
], device=__A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], __A, atol=1E-4 ) )
| 336 | 1 |
def a__ ( UpperCAmelCase : str , UpperCAmelCase : str ) -> int:
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError('''String lengths must match!''' )
UpperCAmelCase : Union[str, Any] = 0
for chara, chara in zip(UpperCAmelCase , UpperCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """codegen"""
UpperCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any, __A : Optional[int]=5_0_4_0_0, __A : Tuple=2_0_4_8, __A : Optional[int]=2_0_4_8, __A : List[str]=4_0_9_6, __A : List[str]=2_8, __A : Union[str, Any]=1_6, __A : Tuple=6_4, __A : Union[str, Any]=None, __A : Union[str, Any]="gelu_new", __A : Any=0.0, __A : Dict=0.0, __A : str=0.0, __A : Optional[int]=1E-5, __A : Any=0.0_2, __A : Any=True, __A : Union[str, Any]=5_0_2_5_6, __A : List[str]=5_0_2_5_6, __A : int=False, **__A : List[Any], ):
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Tuple = n_ctx
UpperCAmelCase : Tuple = n_positions
UpperCAmelCase : Optional[int] = n_embd
UpperCAmelCase : Union[str, Any] = n_layer
UpperCAmelCase : List[str] = n_head
UpperCAmelCase : Tuple = n_inner
UpperCAmelCase : int = rotary_dim
UpperCAmelCase : List[Any] = activation_function
UpperCAmelCase : List[str] = resid_pdrop
UpperCAmelCase : Optional[Any] = embd_pdrop
UpperCAmelCase : str = attn_pdrop
UpperCAmelCase : Tuple = layer_norm_epsilon
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : Any = bos_token_id
UpperCAmelCase : List[str] = eos_token_id
super().__init__(
bos_token_id=__A, eos_token_id=__A, tie_word_embeddings=__A, **__A )
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Any, __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ):
super().__init__(__A, task=__A, patching_specs=__A, use_past=__A )
if not getattr(self._config, '''pad_token_id''', __A ):
# TODO: how to do that better?
UpperCAmelCase : Union[str, Any] = 0
@property
def __magic_name__ ( self : str ):
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__A, direction='''inputs''' )
UpperCAmelCase : int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __magic_name__ ( self : Dict ):
return self._config.n_layer
@property
def __magic_name__ ( self : List[str] ):
return self._config.n_head
def __magic_name__ ( self : str, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ):
UpperCAmelCase : Union[str, Any] = super(__A, self ).generate_dummy_inputs(
__A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase : str = seqlen + 2
UpperCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase : Optional[int] = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase : Optional[Any] = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase : Dict = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 )
return ordered_inputs
@property
def __magic_name__ ( self : Tuple ):
return 1_3
| 336 | 1 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __UpperCAmelCase :
def __init__( self : str, __A : List[Any], ):
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : Optional[int] = 1_3
UpperCAmelCase : Dict = 7
UpperCAmelCase : Union[str, Any] = 3_0
UpperCAmelCase : Any = self.seq_length + self.mem_len
UpperCAmelCase : List[Any] = 1_5
UpperCAmelCase : Any = True
UpperCAmelCase : int = True
UpperCAmelCase : List[str] = 9_9
UpperCAmelCase : Optional[Any] = [1_0, 5_0, 8_0]
UpperCAmelCase : Optional[int] = 3_2
UpperCAmelCase : Optional[int] = 3_2
UpperCAmelCase : List[Any] = 4
UpperCAmelCase : Optional[Any] = 8
UpperCAmelCase : Tuple = 1_2_8
UpperCAmelCase : Dict = 2
UpperCAmelCase : List[str] = 2
UpperCAmelCase : Tuple = None
UpperCAmelCase : Dict = 1
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = self.vocab_size - 1
UpperCAmelCase : Dict = 0.0_1
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase : Optional[Any] = TransfoXLConfig(
vocab_size=self.vocab_size, mem_len=self.mem_len, clamp_len=self.clamp_len, cutoffs=self.cutoffs, d_model=self.hidden_size, d_embed=self.d_embed, n_head=self.num_attention_heads, d_head=self.d_head, d_inner=self.d_inner, div_val=self.div_val, n_layer=self.num_hidden_layers, eos_token_id=self.eos_token_id, pad_token_id=self.vocab_size - 1, init_range=self.init_range, num_labels=self.num_labels, )
return (config, input_ids_a, input_ids_a, lm_labels)
def __magic_name__ ( self : Union[str, Any] ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __magic_name__ ( self : List[Any], __A : Optional[int], __A : List[Any], __A : Union[str, Any], __A : List[str] ):
UpperCAmelCase : Tuple = TFTransfoXLModel(__A )
UpperCAmelCase , UpperCAmelCase : Optional[int] = model(__A ).to_tuple()
UpperCAmelCase : List[str] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
UpperCAmelCase , UpperCAmelCase : Optional[int] = model(__A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
def __magic_name__ ( self : str, __A : List[str], __A : List[Any], __A : Tuple, __A : Dict ):
UpperCAmelCase : List[str] = TFTransfoXLLMHeadModel(__A )
UpperCAmelCase , UpperCAmelCase : int = model(__A ).to_tuple()
UpperCAmelCase : int = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
UpperCAmelCase , UpperCAmelCase : Optional[int] = model(__A ).to_tuple()
UpperCAmelCase , UpperCAmelCase : List[Any] = model([input_ids_a, mems_a] ).to_tuple()
UpperCAmelCase : Optional[Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
UpperCAmelCase , UpperCAmelCase : Any = model(__A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
self.parent.assertEqual(lm_logits_a.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
def __magic_name__ ( self : Optional[int], __A : Union[str, Any], __A : Union[str, Any], __A : Tuple, __A : Any ):
UpperCAmelCase : List[str] = TFTransfoXLForSequenceClassification(__A )
UpperCAmelCase : List[str] = model(__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Any ):
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : List[Any] = config_and_inputs
UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase = () if is_tf_available() else ()
UpperCamelCase = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : str, __A : Optional[int], __A : Tuple, __A : Any, __A : List[str], __A : List[Any] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = TFTransfoXLModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self, config_class=__A, d_embed=3_7 )
def __magic_name__ ( self : Dict ):
self.config_tester.run_common_tests()
def __magic_name__ ( self : int ):
self.model_tester.set_seed()
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__A )
def __magic_name__ ( self : List[str] ):
self.model_tester.set_seed()
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__A )
def __magic_name__ ( self : Any ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__A )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCAmelCase : Optional[int] = model.get_output_embeddings()
assert isinstance(__A, tf.keras.layers.Layer )
UpperCAmelCase : Dict = model.get_bias()
assert name is None
else:
UpperCAmelCase : Tuple = model.get_output_embeddings()
assert x is None
UpperCAmelCase : str = model.get_bias()
assert name is None
def __magic_name__ ( self : str ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __magic_name__ ( self : Optional[Any] ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = TFTransfoXLModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def __magic_name__ ( self : Optional[Any] ):
pass
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : str = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
UpperCAmelCase : Dict = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]], dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCAmelCase : Dict = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCAmelCase : Optional[Any] = model.generate(__A, max_length=2_0_0, do_sample=__A )
self.assertListEqual(output_ids[0].numpy().tolist(), __A )
| 336 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 336 | 1 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __UpperCAmelCase :
def __init__( self : List[Any], __A : List[str], __A : List[str]=1_3, __A : Any=6_4, __A : Optional[Any]=2, __A : str=3, __A : str=True, __A : str=True, __A : Optional[Any]=3_2, __A : List[str]=5, __A : int=4, __A : str=3_7, __A : str="gelu", __A : Dict=0.1, __A : List[Any]=0.1, __A : Dict=1_0, __A : int=0.0_2, __A : Any=[1, 1_6, 4, 4], __A : Optional[int]=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : List[str] = patch_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : int = scope
UpperCAmelCase : List[str] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase : str = (self.image_size // 3_2) ** 2
UpperCAmelCase : List[str] = num_patches + 1
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : str = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Any ):
UpperCAmelCase : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__A, )
def __magic_name__ ( self : Optional[int], __A : Optional[int], __A : int, __A : Tuple ):
UpperCAmelCase : int = ViTHybridModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : Dict, __A : str, __A : List[str] ):
UpperCAmelCase : str = self.type_sequence_label_size
UpperCAmelCase : List[Any] = ViTHybridForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : int ):
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs
UpperCAmelCase : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = ViTHybridModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : int ):
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, nn.Linear ) )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(__A )
UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = _config_zero_init(__A )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(config=__A )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase : Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@slow
def __magic_name__ ( self : List[str] ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Union[str, Any] = ViTHybridModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> Tuple:
UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : str ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
UpperCAmelCase : Tuple = self.default_image_processor
UpperCAmelCase : int = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**__A )
# verify the logits
UpperCAmelCase : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Optional[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
@require_accelerate
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Union[str, Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' )
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=__A, return_tensors='''pt''' )
UpperCAmelCase : Dict = model(**__A )
UpperCAmelCase : Any = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase : Dict = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
| 336 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __UpperCAmelCase :
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
@classmethod
def __magic_name__ ( cls : Any ):
return cls()
@dataclass
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@property
def __magic_name__ ( self : Optional[int] ):
return True
@register_to_config
def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ):
pass
def __magic_name__ ( self : Optional[Any] ):
return KarrasVeSchedulerState.create()
def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ):
UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy()
UpperCAmelCase : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, )
def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 )
else:
UpperCAmelCase : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 )
UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape )
UpperCAmelCase : Tuple = sigma + gamma * sigma
UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : int = sample_hat + sigma_hat * model_output
UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output
UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ):
raise NotImplementedError()
| 336 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Any, __A : Any, __A : Optional[Any]=7, __A : Optional[Any]=3, __A : Optional[Any]=1_8, __A : Tuple=3_0, __A : Optional[Any]=4_0_0, __A : Any=True, __A : List[Any]=None, __A : Tuple=True, __A : Tuple=None, __A : Optional[int]=True, __A : Any=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], __A : int=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], __A : List[Any]=True, ):
UpperCAmelCase : List[Any] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
UpperCAmelCase : Any = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
UpperCAmelCase : int = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : List[Any] = num_channels
UpperCAmelCase : Optional[Any] = image_size
UpperCAmelCase : str = min_resolution
UpperCAmelCase : str = max_resolution
UpperCAmelCase : List[str] = do_resize
UpperCAmelCase : Union[str, Any] = size
UpperCAmelCase : Dict = do_center_crop
UpperCAmelCase : Tuple = crop_size
UpperCAmelCase : Dict = do_normalize
UpperCAmelCase : List[Any] = image_mean
UpperCAmelCase : Optional[Any] = image_std
UpperCAmelCase : List[Any] = do_convert_rgb
def __magic_name__ ( self : Tuple ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __magic_name__ ( self : List[Any], __A : Dict=False, __A : Optional[Any]=False, __A : List[str]=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
UpperCAmelCase : Any = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5, size=(self.num_channels, self.max_resolution, self.max_resolution), dtype=np.uinta ) )
else:
UpperCAmelCase : List[str] = []
for i in range(self.batch_size ):
UpperCAmelCase , UpperCAmelCase : int = np.random.choice(np.arange(self.min_resolution, self.max_resolution ), 2 )
image_inputs.append(np.random.randint(2_5_5, size=(self.num_channels, width, height), dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
UpperCAmelCase : Tuple = [Image.fromarray(np.moveaxis(__A, 0, -1 ) ) for x in image_inputs]
if torchify:
UpperCAmelCase : Optional[Any] = [torch.from_numpy(__A ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __magic_name__ ( self : int ):
UpperCAmelCase : str = ChineseCLIPImageProcessingTester(self, do_center_crop=__A )
@property
def __magic_name__ ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A, '''do_resize''' ) )
self.assertTrue(hasattr(__A, '''size''' ) )
self.assertTrue(hasattr(__A, '''do_center_crop''' ) )
self.assertTrue(hasattr(__A, '''center_crop''' ) )
self.assertTrue(hasattr(__A, '''do_normalize''' ) )
self.assertTrue(hasattr(__A, '''image_mean''' ) )
self.assertTrue(hasattr(__A, '''image_std''' ) )
self.assertTrue(hasattr(__A, '''do_convert_rgb''' ) )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''height''': 2_2_4, '''width''': 2_2_4} )
self.assertEqual(image_processor.crop_size, {'''height''': 1_8, '''width''': 1_8} )
UpperCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict, size=4_2, crop_size=8_4 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size, {'''height''': 8_4, '''width''': 8_4} )
def __magic_name__ ( self : Union[str, Any] ):
pass
def __magic_name__ ( self : Optional[Any] ):
# Initialize image_processing
UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A, Image.Image )
# Test not batched input
UpperCAmelCase : List[str] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : List[Any] = image_processing(__A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def __magic_name__ ( self : List[Any] ):
# Initialize image_processing
UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : int = self.image_processor_tester.prepare_inputs(equal_resolution=__A, numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A, np.ndarray )
# Test not batched input
UpperCAmelCase : Union[str, Any] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : Union[str, Any] = image_processing(__A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def __magic_name__ ( self : Dict ):
# Initialize image_processing
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__A, torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A, torch.Tensor )
# Test not batched input
UpperCAmelCase : List[str] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : str = image_processing(__A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
@require_torch
@require_vision
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Any = ChineseCLIPImageProcessingTester(self, num_channels=4, do_center_crop=__A )
UpperCAmelCase : List[str] = 3
@property
def __magic_name__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A, '''do_resize''' ) )
self.assertTrue(hasattr(__A, '''size''' ) )
self.assertTrue(hasattr(__A, '''do_center_crop''' ) )
self.assertTrue(hasattr(__A, '''center_crop''' ) )
self.assertTrue(hasattr(__A, '''do_normalize''' ) )
self.assertTrue(hasattr(__A, '''image_mean''' ) )
self.assertTrue(hasattr(__A, '''image_std''' ) )
self.assertTrue(hasattr(__A, '''do_convert_rgb''' ) )
def __magic_name__ ( self : Optional[int] ):
pass
def __magic_name__ ( self : Any ):
# Initialize image_processing
UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A, Image.Image )
# Test not batched input
UpperCAmelCase : Tuple = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : Dict = image_processing(__A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 336 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __UpperCAmelCase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def a__ ( ) -> Dict:
if os.name == "nt":
UpperCAmelCase : List[str] = CursorInfo()
UpperCAmelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Dict = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def a__ ( ) -> Optional[int]:
if os.name == "nt":
UpperCAmelCase : int = CursorInfo()
UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Any = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def a__ ( ) -> Optional[Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 336 | 1 |
import cva
import numpy as np
class __UpperCAmelCase :
def __init__( self : int, __A : float, __A : int ):
if k in (0.0_4, 0.0_6):
UpperCAmelCase : Tuple = k
UpperCAmelCase : str = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : Any ):
return str(self.k )
def __magic_name__ ( self : str, __A : str ):
UpperCAmelCase : Tuple = cva.imread(__A, 0 )
UpperCAmelCase , UpperCAmelCase : int = img.shape
UpperCAmelCase : list[list[int]] = []
UpperCAmelCase : int = img.copy()
UpperCAmelCase : Union[str, Any] = cva.cvtColor(__A, cva.COLOR_GRAY2RGB )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = np.gradient(__A )
UpperCAmelCase : int = dx**2
UpperCAmelCase : Dict = dy**2
UpperCAmelCase : str = dx * dy
UpperCAmelCase : Optional[int] = 0.0_4
UpperCAmelCase : str = self.window_size // 2
for y in range(__A, h - offset ):
for x in range(__A, w - offset ):
UpperCAmelCase : Any = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase : Optional[int] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase : str = (wxx * wyy) - (wxy**2)
UpperCAmelCase : Any = wxx + wyy
UpperCAmelCase : Optional[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = HarrisCorner(0.0_4, 3)
_lowerCamelCase , _lowerCamelCase : Any = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 336 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase : Tuple = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 336 | 1 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def a__ ( *UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Union[Dict, Any]] = None , UpperCAmelCase : Dict=True , UpperCAmelCase : Any=2 ) -> Tuple:
from .. import __version__
UpperCAmelCase : Union[str, Any] = take_from
UpperCAmelCase : Optional[Any] = ()
if not isinstance(args[0] , UpperCAmelCase ):
UpperCAmelCase : Tuple = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCAmelCase ).base_version ) >= version.parse(UpperCAmelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
UpperCAmelCase : Dict = None
if isinstance(UpperCAmelCase , UpperCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCAmelCase ),)
UpperCAmelCase : Dict = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(UpperCAmelCase , UpperCAmelCase ):
values += (getattr(UpperCAmelCase , UpperCAmelCase ),)
UpperCAmelCase : str = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
UpperCAmelCase : List[str] = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
UpperCAmelCase : Optional[int] = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , UpperCAmelCase , stacklevel=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) > 0:
UpperCAmelCase : Optional[Any] = inspect.getouterframes(inspect.currentframe() )[1]
UpperCAmelCase : List[str] = call_frame.filename
UpperCAmelCase : int = call_frame.lineno
UpperCAmelCase : Tuple = call_frame.function
UpperCAmelCase , UpperCAmelCase : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(UpperCAmelCase ) == 0:
return
elif len(UpperCAmelCase ) == 1:
return values[0]
return values
| 336 |
from __future__ import annotations
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
UpperCAmelCase : str = number_of_bytes // partitions
UpperCAmelCase : Dict = []
for i in range(UpperCAmelCase ):
UpperCAmelCase : int = i * bytes_per_partition + 1
UpperCAmelCase : Optional[int] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """upernet"""
def __init__( self : Any, __A : Any=None, __A : int=5_1_2, __A : List[str]=0.0_2, __A : Union[str, Any]=[1, 2, 3, 6], __A : Tuple=True, __A : Union[str, Any]=0.4, __A : List[str]=3_8_4, __A : str=2_5_6, __A : Union[str, Any]=1, __A : Tuple=False, __A : int=2_5_5, **__A : Any, ):
super().__init__(**__A )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCAmelCase : List[str] = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(__A, __A ):
UpperCAmelCase : List[Any] = backbone_config.get('''model_type''' )
UpperCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : List[str] = config_class.from_dict(__A )
UpperCAmelCase : Dict = backbone_config
UpperCAmelCase : str = hidden_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : List[str] = pool_scales
UpperCAmelCase : Optional[int] = use_auxiliary_head
UpperCAmelCase : str = auxiliary_loss_weight
UpperCAmelCase : Any = auxiliary_in_channels
UpperCAmelCase : List[str] = auxiliary_channels
UpperCAmelCase : Tuple = auxiliary_num_convs
UpperCAmelCase : int = auxiliary_concat_input
UpperCAmelCase : Optional[int] = loss_ignore_index
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Tuple = self.backbone_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 336 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_lowerCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def a__ ( UpperCAmelCase : Dict=None ) -> Optional[int]:
if subparsers is not None:
UpperCAmelCase : Tuple = subparsers.add_parser('''tpu-config''' , description=_description )
else:
UpperCAmelCase : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
UpperCAmelCase : Optional[int] = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=UpperCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=UpperCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
UpperCAmelCase : Union[str, Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=UpperCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase : List[str] = defaults.commands
if not args.tpu_name:
UpperCAmelCase : Tuple = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase : int = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCAmelCase : Dict = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ):
UpperCAmelCase : Optional[int] = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
UpperCAmelCase : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , UpperCAmelCase ):
UpperCAmelCase : int = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
UpperCAmelCase : int = '''; '''.join(UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase : Any = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(UpperCAmelCase )}''' )
return
subprocess.run(UpperCAmelCase )
print('''Successfully setup pod.''' )
def a__ ( ) -> Any:
UpperCAmelCase : Any = tpu_command_parser()
UpperCAmelCase : Tuple = parser.parse_args()
tpu_command_launcher(UpperCAmelCase )
| 336 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Any = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 336 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
print('''Loading config file...''' )
def flatten_yaml_as_dict(UpperCAmelCase : Tuple , UpperCAmelCase : Any="" , UpperCAmelCase : Dict="." ):
UpperCAmelCase : List[str] = []
for k, v in d.items():
UpperCAmelCase : List[Any] = parent_key + sep + k if parent_key else k
if isinstance(UpperCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCAmelCase , UpperCAmelCase , sep=UpperCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCAmelCase )
UpperCAmelCase : List[str] = argparse.Namespace()
with open(UpperCAmelCase , '''r''' ) as yaml_file:
try:
UpperCAmelCase : List[str] = yaml.load(UpperCAmelCase , Loader=yaml.FullLoader )
UpperCAmelCase : Optional[int] = flatten_yaml_as_dict(UpperCAmelCase )
for k, v in flat_cfg.items():
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(UpperCAmelCase , str(UpperCAmelCase ) ) )
return config
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> List[Any]:
UpperCAmelCase : int = MobileViTVaConfig()
UpperCAmelCase : str = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase : Any = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase : Any = 384
else:
UpperCAmelCase : Tuple = 256
UpperCAmelCase : int = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase : Optional[Any] = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase : str = 384
else:
UpperCAmelCase : Dict = 256
UpperCAmelCase : List[Any] = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase : Optional[Any] = 151
UpperCAmelCase : Tuple = 512
UpperCAmelCase : Tuple = '''ade20k-id2label.json'''
UpperCAmelCase : Tuple = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase : Dict = 21
UpperCAmelCase : str = 512
UpperCAmelCase : Union[str, Any] = '''pascal-voc-id2label.json'''
UpperCAmelCase : Dict = True
# orig_config
UpperCAmelCase : List[Any] = load_orig_config_file(UpperCAmelCase )
assert getattr(UpperCAmelCase , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase : Tuple = getattr(UpperCAmelCase , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(UpperCAmelCase , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase : str = getattr(UpperCAmelCase , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase : int = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase : Any = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase : Union[str, Any] = '''huggingface/label-files'''
UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Any = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : int = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> List[str]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : List[str] = val
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=False ) -> Union[str, Any]:
if base_model:
UpperCAmelCase : Dict = ''''''
else:
UpperCAmelCase : Dict = '''mobilevitv2.'''
UpperCAmelCase : Optional[int] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase : List[str] = k[8:]
else:
UpperCAmelCase : Dict = k
if ".block." in k:
UpperCAmelCase : List[Any] = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase : List[str] = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('''conv_1.''' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase : int = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
UpperCAmelCase : Any = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
UpperCAmelCase : str = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
UpperCAmelCase : int = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase : Dict = [0, 1]
elif i == 4:
UpperCAmelCase : Dict = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase : int = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
UpperCAmelCase : Optional[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
UpperCAmelCase : Any = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase : List[Any] = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase : Any = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase : Tuple = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase : Optional[int] = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Any:
UpperCAmelCase : str = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(UpperCAmelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = get_mobilevitva_config(UpperCAmelCase , UpperCAmelCase )
# load original state_dict
UpperCAmelCase : List[str] = torch.load(UpperCAmelCase , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase : str = MobileViTVaForSemanticSegmentation(UpperCAmelCase ).eval()
UpperCAmelCase : str = False
else:
UpperCAmelCase : Union[str, Any] = MobileViTVaForImageClassification(UpperCAmelCase ).eval()
UpperCAmelCase : Any = False
# remove and rename some keys of load the original model
UpperCAmelCase : Optional[Any] = checkpoint
remove_unused_keys(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = create_rename_keys(UpperCAmelCase , base_model=UpperCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# load modified state_dict
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase : Any = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase : Union[str, Any] = model(**UpperCAmelCase )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase : Optional[Any] = outputs.logits
UpperCAmelCase : int = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase : str = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1E-4 )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 336 | 1 |
from __future__ import annotations
_lowerCamelCase : Optional[Any] = 1.60_21E-19 # units = C
def a__ ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __UpperCAmelCase ( lowerCamelCase__ ):
def __get__( self : Tuple, __A : Optional[Any], __A : Optional[int]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCAmelCase : str = '''__cached_''' + self.fget.__name__
UpperCAmelCase : int = getattr(__A, __A, __A )
if cached is None:
UpperCAmelCase : Any = self.fget(__A )
setattr(__A, __A, __A )
return cached
def a__ ( UpperCAmelCase : Optional[Any] ) -> Any:
UpperCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_torch_fx_proxy(UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Union[str, Any]:
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : str ) -> Tuple:
return _is_numpy(UpperCAmelCase )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
import torch
return isinstance(UpperCAmelCase , torch.Tensor )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
return False if not is_torch_available() else _is_torch(UpperCAmelCase )
def a__ ( UpperCAmelCase : Tuple ) -> List[str]:
import torch
return isinstance(UpperCAmelCase , torch.device )
def a__ ( UpperCAmelCase : Any ) -> Any:
return False if not is_torch_available() else _is_torch_device(UpperCAmelCase )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ):
if hasattr(UpperCAmelCase , UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
else:
return False
return isinstance(UpperCAmelCase , torch.dtype )
def a__ ( UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase )
def a__ ( UpperCAmelCase : Any ) -> str:
import tensorflow as tf
return isinstance(UpperCAmelCase , tf.Tensor )
def a__ ( UpperCAmelCase : int ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[str] ) -> Tuple:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCAmelCase , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(UpperCAmelCase )
return type(UpperCAmelCase ) == tf.Tensor
def a__ ( UpperCAmelCase : int ) -> List[Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[Any] ) -> Dict:
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCAmelCase , jnp.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]:
return False if not is_flax_available() else _is_jax(UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Tuple:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return [to_py_obj(UpperCAmelCase ) for o in obj]
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase ).tolist()
elif isinstance(UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( UpperCAmelCase : Any ) -> List[str]:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return np.array(UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase )
else:
return obj
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(__A ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase : int = getattr(self, class_fields[0].name )
UpperCAmelCase : str = all(getattr(self, field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__A ):
if isinstance(__A, __A ):
UpperCAmelCase : Tuple = first_field.items()
UpperCAmelCase : Any = True
else:
try:
UpperCAmelCase : Optional[Any] = iter(__A )
UpperCAmelCase : Optional[Any] = True
except TypeError:
UpperCAmelCase : Optional[int] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__A ):
if (
not isinstance(__A, (list, tuple) )
or not len(__A ) == 2
or not isinstance(element[0], __A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self, element[0], element[1] )
if element[1] is not None:
UpperCAmelCase : Union[str, Any] = element[1]
elif first_field is not None:
UpperCAmelCase : Union[str, Any] = first_field
else:
for field in class_fields:
UpperCAmelCase : Optional[Any] = getattr(self, field.name )
if v is not None:
UpperCAmelCase : Optional[int] = v
def __delitem__( self : Union[str, Any], *__A : str, **__A : Tuple ):
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : List[str], *__A : Union[str, Any], **__A : Optional[Any] ):
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Any, *__A : Dict, **__A : str ):
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Dict, *__A : int, **__A : Dict ):
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[str], __A : List[str] ):
if isinstance(__A, __A ):
UpperCAmelCase : int = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any], __A : Dict, __A : Union[str, Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__A, __A )
super().__setattr__(__A, __A )
def __setitem__( self : Dict, __A : List[Any], __A : Union[str, Any] ):
# Will raise a KeyException if needed
super().__setitem__(__A, __A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__A, __A )
def __magic_name__ ( self : List[str] ):
return tuple(self[k] for k in self.keys() )
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@classmethod
def __magic_name__ ( cls : List[Any], __A : Tuple ):
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """longest"""
UpperCamelCase = """max_length"""
UpperCamelCase = """do_not_pad"""
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """pt"""
UpperCamelCase = """tf"""
UpperCamelCase = """np"""
UpperCamelCase = """jax"""
class __UpperCAmelCase :
def __init__( self : Any, __A : List[ContextManager] ):
UpperCAmelCase : Tuple = context_managers
UpperCAmelCase : Tuple = ExitStack()
def __enter__( self : Any ):
for context_manager in self.context_managers:
self.stack.enter_context(__A )
def __exit__( self : List[Any], *__A : Union[str, Any], **__A : Dict ):
self.stack.__exit__(*__A, **__A )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> str:
UpperCAmelCase : int = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( UpperCAmelCase : Dict ) -> Any:
UpperCAmelCase : List[Any] = model_class.__name__
UpperCAmelCase : Union[str, Any] = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( UpperCAmelCase : MutableMapping , UpperCAmelCase : str = "" , UpperCAmelCase : str = "." ) -> Union[str, Any]:
def _flatten_dict(UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]="" , UpperCAmelCase : Any="." ):
for k, v in d.items():
UpperCAmelCase : List[str] = str(UpperCAmelCase ) + delimiter + str(UpperCAmelCase ) if parent_key else k
if v and isinstance(UpperCAmelCase , UpperCAmelCase ):
yield from flatten_dict(UpperCAmelCase , UpperCAmelCase , delimiter=UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
@contextmanager
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : bool = False ) -> Optional[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ) -> Optional[Any]:
if is_numpy_array(UpperCAmelCase ):
return np.transpose(UpperCAmelCase , axes=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.T if axes is None else array.permute(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(UpperCAmelCase , perm=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.transpose(UpperCAmelCase , axes=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.reshape(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.reshape(UpperCAmelCase , UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None ) -> Any:
if is_numpy_array(UpperCAmelCase ):
return np.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> str:
if is_numpy_array(UpperCAmelCase ):
return np.expand_dims(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.unsqueeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.size(UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.size(UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Dict:
for key, value in auto_map.items():
if isinstance(UpperCAmelCase , (tuple, list) ):
UpperCAmelCase : List[Any] = [f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase : List[Any] = f'''{repo_id}--{value}'''
return auto_map
def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]:
for base_class in inspect.getmro(UpperCAmelCase ):
UpperCAmelCase : Any = base_class.__module__
UpperCAmelCase : Dict = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 336 | 1 |
import requests
_lowerCamelCase : List[str] = "YOUR API KEY"
def a__ ( UpperCAmelCase : str , UpperCAmelCase : str = giphy_api_key ) -> list:
UpperCAmelCase : Any = '''+'''.join(query.split() )
UpperCAmelCase : List[Any] = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
UpperCAmelCase : Any = requests.get(UpperCAmelCase ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 336 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = LayoutLMTokenizer
UpperCamelCase = LayoutLMTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __magic_name__ ( self : Any ):
super().setUp()
UpperCAmelCase : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__ ( self : Union[str, Any], **__A : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **__A )
def __magic_name__ ( self : Optional[int], __A : int ):
UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running'''
UpperCAmelCase : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __magic_name__ ( self : Any ):
UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__A, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), [7, 4, 5, 1_0, 8, 9] )
def __magic_name__ ( self : Optional[int] ):
pass
| 336 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.